2024-11-16 19:26:14,013 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-16 19:26:14,025 main DEBUG Took 0.010242 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-16 19:26:14,026 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-16 19:26:14,026 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-16 19:26:14,027 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-16 19:26:14,029 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 19:26:14,036 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-16 19:26:14,048 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 19:26:14,049 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 19:26:14,050 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 19:26:14,050 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 19:26:14,051 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 19:26:14,051 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 19:26:14,052 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 19:26:14,052 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 19:26:14,053 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 19:26:14,053 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 19:26:14,054 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 19:26:14,054 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 19:26:14,055 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 19:26:14,055 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 19:26:14,056 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 19:26:14,056 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 19:26:14,057 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 19:26:14,057 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 19:26:14,057 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 19:26:14,058 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 19:26:14,058 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 19:26:14,059 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 19:26:14,059 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 19:26:14,059 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 19:26:14,060 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 19:26:14,060 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-16 19:26:14,061 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 19:26:14,063 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-16 19:26:14,064 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-16 19:26:14,065 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-16 19:26:14,066 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-16 19:26:14,067 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-16 19:26:14,074 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-16 19:26:14,077 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-16 19:26:14,078 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-16 19:26:14,079 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-16 19:26:14,079 main DEBUG createAppenders(={Console}) 2024-11-16 19:26:14,080 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-16 19:26:14,080 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-16 19:26:14,080 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-16 19:26:14,081 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-16 19:26:14,081 main DEBUG OutputStream closed 2024-11-16 19:26:14,081 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-16 19:26:14,082 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-16 19:26:14,082 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-16 19:26:14,153 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-16 19:26:14,155 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-16 19:26:14,156 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-16 19:26:14,157 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-16 19:26:14,157 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-16 19:26:14,158 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-16 19:26:14,158 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-16 19:26:14,158 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-16 19:26:14,159 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-16 19:26:14,159 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-16 19:26:14,159 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-16 19:26:14,160 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-16 19:26:14,160 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-16 19:26:14,160 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-16 19:26:14,161 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-16 19:26:14,161 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-16 19:26:14,161 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-16 19:26:14,162 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-16 19:26:14,164 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-16 19:26:14,164 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-16 19:26:14,165 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-16 19:26:14,165 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-16T19:26:14,397 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996 2024-11-16 19:26:14,399 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-16 19:26:14,399 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-16T19:26:14,408 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-16T19:26:14,441 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=378, ProcessCount=11, AvailableMemoryMB=3611 2024-11-16T19:26:14,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T19:26:14,463 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/cluster_3002c0b6-5f11-156d-a8b9-03e0a7e62c10, deleteOnExit=true 2024-11-16T19:26:14,464 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T19:26:14,465 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/test.cache.data in system properties and HBase conf 2024-11-16T19:26:14,466 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T19:26:14,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/hadoop.log.dir in system properties and HBase conf 2024-11-16T19:26:14,468 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T19:26:14,469 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T19:26:14,470 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T19:26:14,581 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-16T19:26:14,674 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T19:26:14,678 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T19:26:14,679 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T19:26:14,680 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T19:26:14,681 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T19:26:14,682 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T19:26:14,682 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T19:26:14,683 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T19:26:14,684 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T19:26:14,685 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T19:26:14,686 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/nfs.dump.dir in system properties and HBase conf 2024-11-16T19:26:14,687 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/java.io.tmpdir in system properties and HBase conf 2024-11-16T19:26:14,687 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T19:26:14,688 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T19:26:14,689 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T19:26:15,247 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T19:26:15,533 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-16T19:26:15,608 INFO [Time-limited test {}] log.Log(170): Logging initialized @2261ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-16T19:26:15,679 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:26:15,747 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T19:26:15,767 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T19:26:15,767 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T19:26:15,768 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T19:26:15,779 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:26:15,782 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/hadoop.log.dir/,AVAILABLE} 2024-11-16T19:26:15,783 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T19:26:15,956 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/java.io.tmpdir/jetty-localhost-42161-hadoop-hdfs-3_4_1-tests_jar-_-any-6539123712868348615/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T19:26:15,964 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:42161} 2024-11-16T19:26:15,965 INFO [Time-limited test {}] server.Server(415): Started @2619ms 2024-11-16T19:26:15,994 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T19:26:16,335 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:26:16,346 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T19:26:16,350 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T19:26:16,351 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T19:26:16,351 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T19:26:16,353 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/hadoop.log.dir/,AVAILABLE} 2024-11-16T19:26:16,354 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T19:26:16,485 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/java.io.tmpdir/jetty-localhost-38299-hadoop-hdfs-3_4_1-tests_jar-_-any-8309621425134713269/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:26:16,486 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:38299} 2024-11-16T19:26:16,486 INFO [Time-limited test {}] server.Server(415): Started @3140ms 2024-11-16T19:26:16,556 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T19:26:16,731 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:26:16,749 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T19:26:16,761 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T19:26:16,762 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T19:26:16,762 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T19:26:16,765 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/hadoop.log.dir/,AVAILABLE} 2024-11-16T19:26:16,766 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T19:26:16,910 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/java.io.tmpdir/jetty-localhost-39741-hadoop-hdfs-3_4_1-tests_jar-_-any-4759611871914293014/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:26:16,911 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:39741} 2024-11-16T19:26:16,911 INFO [Time-limited test {}] server.Server(415): Started @3565ms 2024-11-16T19:26:16,914 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T19:26:17,055 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/cluster_3002c0b6-5f11-156d-a8b9-03e0a7e62c10/data/data1/current/BP-1946553720-172.17.0.2-1731785175328/current, will proceed with Du for space computation calculation, 2024-11-16T19:26:17,055 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/cluster_3002c0b6-5f11-156d-a8b9-03e0a7e62c10/data/data3/current/BP-1946553720-172.17.0.2-1731785175328/current, will proceed with Du for space computation calculation, 2024-11-16T19:26:17,055 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/cluster_3002c0b6-5f11-156d-a8b9-03e0a7e62c10/data/data2/current/BP-1946553720-172.17.0.2-1731785175328/current, will proceed with Du for space computation calculation, 2024-11-16T19:26:17,057 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/cluster_3002c0b6-5f11-156d-a8b9-03e0a7e62c10/data/data4/current/BP-1946553720-172.17.0.2-1731785175328/current, will proceed with Du for space computation calculation, 2024-11-16T19:26:17,125 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T19:26:17,126 WARN [Thread-82 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T19:26:17,206 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2769f57d73af4864 with lease ID 0x8bf680363d41472c: Processing first storage report for DS-81e019ed-0d69-4373-add4-83a5cd5128da from datanode DatanodeRegistration(127.0.0.1:39641, datanodeUuid=114de53b-21e8-42bb-a5d2-50fe379859ed, infoPort=41507, infoSecurePort=0, ipcPort=40331, storageInfo=lv=-57;cid=testClusterID;nsid=1990074821;c=1731785175328) 2024-11-16T19:26:17,208 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2769f57d73af4864 with lease ID 0x8bf680363d41472c: from storage DS-81e019ed-0d69-4373-add4-83a5cd5128da node DatanodeRegistration(127.0.0.1:39641, datanodeUuid=114de53b-21e8-42bb-a5d2-50fe379859ed, infoPort=41507, infoSecurePort=0, ipcPort=40331, storageInfo=lv=-57;cid=testClusterID;nsid=1990074821;c=1731785175328), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T19:26:17,208 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xeee425f2a97c76f0 with lease ID 0x8bf680363d41472d: Processing first storage report for DS-8c3d13ef-75aa-43d5-b271-3f7a588a8e62 from datanode DatanodeRegistration(127.0.0.1:41287, datanodeUuid=840e330b-ce22-48ad-9b96-5a95a2354a56, infoPort=41019, infoSecurePort=0, ipcPort=46761, storageInfo=lv=-57;cid=testClusterID;nsid=1990074821;c=1731785175328) 2024-11-16T19:26:17,209 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeee425f2a97c76f0 with lease ID 0x8bf680363d41472d: from storage DS-8c3d13ef-75aa-43d5-b271-3f7a588a8e62 node DatanodeRegistration(127.0.0.1:41287, datanodeUuid=840e330b-ce22-48ad-9b96-5a95a2354a56, infoPort=41019, infoSecurePort=0, ipcPort=46761, storageInfo=lv=-57;cid=testClusterID;nsid=1990074821;c=1731785175328), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:26:17,209 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2769f57d73af4864 with lease ID 0x8bf680363d41472c: Processing first storage report for DS-d245b3d4-53bd-4432-9d88-4d69ca53d35f from datanode DatanodeRegistration(127.0.0.1:39641, datanodeUuid=114de53b-21e8-42bb-a5d2-50fe379859ed, infoPort=41507, infoSecurePort=0, ipcPort=40331, storageInfo=lv=-57;cid=testClusterID;nsid=1990074821;c=1731785175328) 2024-11-16T19:26:17,209 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2769f57d73af4864 with lease ID 0x8bf680363d41472c: from storage DS-d245b3d4-53bd-4432-9d88-4d69ca53d35f node DatanodeRegistration(127.0.0.1:39641, datanodeUuid=114de53b-21e8-42bb-a5d2-50fe379859ed, infoPort=41507, infoSecurePort=0, ipcPort=40331, storageInfo=lv=-57;cid=testClusterID;nsid=1990074821;c=1731785175328), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:26:17,209 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xeee425f2a97c76f0 with lease ID 0x8bf680363d41472d: Processing first storage report for DS-c7e28214-aab1-4c45-861f-cbfdf1c6f724 from datanode DatanodeRegistration(127.0.0.1:41287, datanodeUuid=840e330b-ce22-48ad-9b96-5a95a2354a56, infoPort=41019, infoSecurePort=0, ipcPort=46761, storageInfo=lv=-57;cid=testClusterID;nsid=1990074821;c=1731785175328) 2024-11-16T19:26:17,210 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeee425f2a97c76f0 with lease ID 0x8bf680363d41472d: from storage DS-c7e28214-aab1-4c45-861f-cbfdf1c6f724 node DatanodeRegistration(127.0.0.1:41287, datanodeUuid=840e330b-ce22-48ad-9b96-5a95a2354a56, infoPort=41019, infoSecurePort=0, ipcPort=46761, storageInfo=lv=-57;cid=testClusterID;nsid=1990074821;c=1731785175328), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:26:17,269 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996 2024-11-16T19:26:17,338 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/cluster_3002c0b6-5f11-156d-a8b9-03e0a7e62c10/zookeeper_0, clientPort=55736, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/cluster_3002c0b6-5f11-156d-a8b9-03e0a7e62c10/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/cluster_3002c0b6-5f11-156d-a8b9-03e0a7e62c10/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T19:26:17,347 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55736 2024-11-16T19:26:17,359 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:26:17,363 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:26:17,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741825_1001 (size=7) 2024-11-16T19:26:17,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41287 is added to blk_1073741825_1001 (size=7) 2024-11-16T19:26:17,998 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a with version=8 2024-11-16T19:26:17,999 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/hbase-staging 2024-11-16T19:26:18,082 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-16T19:26:18,275 INFO [Time-limited test {}] client.ConnectionUtils(128): master/d11ab77873cb:0 server-side Connection retries=45 2024-11-16T19:26:18,284 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T19:26:18,284 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T19:26:18,288 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T19:26:18,288 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T19:26:18,289 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T19:26:18,408 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T19:26:18,458 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-16T19:26:18,465 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-16T19:26:18,468 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T19:26:18,489 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 88690 (auto-detected) 2024-11-16T19:26:18,490 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-16T19:26:18,505 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41597 2024-11-16T19:26:18,523 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41597 connecting to ZooKeeper ensemble=127.0.0.1:55736 2024-11-16T19:26:18,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:415970x0, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T19:26:18,552 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41597-0x1004a0015560000 connected 2024-11-16T19:26:18,579 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:26:18,582 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:26:18,592 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41597-0x1004a0015560000, quorum=127.0.0.1:55736, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T19:26:18,596 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a, hbase.cluster.distributed=false 2024-11-16T19:26:18,618 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41597-0x1004a0015560000, quorum=127.0.0.1:55736, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T19:26:18,622 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41597 2024-11-16T19:26:18,623 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41597 2024-11-16T19:26:18,623 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41597 2024-11-16T19:26:18,624 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41597 2024-11-16T19:26:18,624 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41597 2024-11-16T19:26:18,731 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/d11ab77873cb:0 server-side Connection retries=45 2024-11-16T19:26:18,733 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T19:26:18,733 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T19:26:18,733 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T19:26:18,733 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T19:26:18,734 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T19:26:18,737 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T19:26:18,740 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T19:26:18,742 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34913 2024-11-16T19:26:18,744 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34913 connecting to ZooKeeper ensemble=127.0.0.1:55736 2024-11-16T19:26:18,746 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:26:18,752 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:26:18,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:349130x0, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T19:26:18,761 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:349130x0, quorum=127.0.0.1:55736, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T19:26:18,761 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34913-0x1004a0015560001 connected 2024-11-16T19:26:18,765 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T19:26:18,772 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T19:26:18,775 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34913-0x1004a0015560001, quorum=127.0.0.1:55736, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T19:26:18,780 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34913-0x1004a0015560001, quorum=127.0.0.1:55736, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T19:26:18,780 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34913 2024-11-16T19:26:18,781 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34913 2024-11-16T19:26:18,781 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34913 2024-11-16T19:26:18,782 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34913 2024-11-16T19:26:18,782 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34913 2024-11-16T19:26:18,795 DEBUG [M:0;d11ab77873cb:41597 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;d11ab77873cb:41597 2024-11-16T19:26:18,796 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/d11ab77873cb,41597,1731785178126 2024-11-16T19:26:18,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34913-0x1004a0015560001, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T19:26:18,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1004a0015560000, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T19:26:18,804 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41597-0x1004a0015560000, quorum=127.0.0.1:55736, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/d11ab77873cb,41597,1731785178126 2024-11-16T19:26:18,822 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1004a0015560000, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:26:18,822 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34913-0x1004a0015560001, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T19:26:18,822 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34913-0x1004a0015560001, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:26:18,823 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41597-0x1004a0015560000, quorum=127.0.0.1:55736, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T19:26:18,825 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/d11ab77873cb,41597,1731785178126 from backup master directory 2024-11-16T19:26:18,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34913-0x1004a0015560001, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T19:26:18,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1004a0015560000, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/d11ab77873cb,41597,1731785178126 2024-11-16T19:26:18,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1004a0015560000, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T19:26:18,828 WARN [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T19:26:18,829 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=d11ab77873cb,41597,1731785178126 2024-11-16T19:26:18,831 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-16T19:26:18,832 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-16T19:26:18,892 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/hbase.id] with ID: 42c13b42-1693-4a59-b3fa-2f412a370d6f 2024-11-16T19:26:18,892 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/.tmp/hbase.id 2024-11-16T19:26:18,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741826_1002 (size=42) 2024-11-16T19:26:18,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41287 is added to blk_1073741826_1002 (size=42) 2024-11-16T19:26:18,907 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/.tmp/hbase.id]:[hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/hbase.id] 2024-11-16T19:26:18,949 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:26:18,955 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T19:26:18,975 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 18ms. 2024-11-16T19:26:18,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1004a0015560000, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:26:18,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34913-0x1004a0015560001, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:26:18,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41287 is added to blk_1073741827_1003 (size=196) 2024-11-16T19:26:18,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741827_1003 (size=196) 2024-11-16T19:26:19,009 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T19:26:19,010 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T19:26:19,016 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T19:26:19,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741828_1004 (size=1189) 2024-11-16T19:26:19,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41287 is added to blk_1073741828_1004 (size=1189) 2024-11-16T19:26:19,062 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/MasterData/data/master/store 2024-11-16T19:26:19,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41287 is added to blk_1073741829_1005 (size=34) 2024-11-16T19:26:19,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741829_1005 (size=34) 2024-11-16T19:26:19,088 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-16T19:26:19,092 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:26:19,094 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T19:26:19,094 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:26:19,094 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:26:19,096 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T19:26:19,096 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:26:19,097 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:26:19,098 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731785179094Disabling compacts and flushes for region at 1731785179094Disabling writes for close at 1731785179096 (+2 ms)Writing region close event to WAL at 1731785179097 (+1 ms)Closed at 1731785179097 2024-11-16T19:26:19,101 WARN [master/d11ab77873cb:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/MasterData/data/master/store/.initializing 2024-11-16T19:26:19,101 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/MasterData/WALs/d11ab77873cb,41597,1731785178126 2024-11-16T19:26:19,124 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d11ab77873cb%2C41597%2C1731785178126, suffix=, logDir=hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/MasterData/WALs/d11ab77873cb,41597,1731785178126, archiveDir=hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/MasterData/oldWALs, maxLogs=10 2024-11-16T19:26:19,134 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C41597%2C1731785178126.1731785179129 2024-11-16T19:26:19,156 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/MasterData/WALs/d11ab77873cb,41597,1731785178126/d11ab77873cb%2C41597%2C1731785178126.1731785179129 2024-11-16T19:26:19,163 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41507:41507),(127.0.0.1/127.0.0.1:41019:41019)] 2024-11-16T19:26:19,164 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T19:26:19,165 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:26:19,168 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:26:19,170 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:26:19,202 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:26:19,225 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T19:26:19,228 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:26:19,230 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:26:19,231 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:26:19,234 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T19:26:19,234 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:26:19,235 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T19:26:19,235 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:26:19,239 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T19:26:19,239 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:26:19,240 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T19:26:19,240 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:26:19,242 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T19:26:19,243 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:26:19,243 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T19:26:19,244 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:26:19,247 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:26:19,249 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:26:19,254 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:26:19,255 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:26:19,259 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T19:26:19,264 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:26:19,269 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T19:26:19,271 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=769459, jitterRate=-0.021582365036010742}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T19:26:19,278 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731785179181Initializing all the Stores at 1731785179184 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785179184Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785179185 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785179185Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785179185Cleaning up temporary data from old regions at 1731785179255 (+70 ms)Region opened successfully at 1731785179278 (+23 ms) 2024-11-16T19:26:19,279 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T19:26:19,312 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75c85d2b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d11ab77873cb/172.17.0.2:0 2024-11-16T19:26:19,339 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T19:26:19,349 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T19:26:19,349 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T19:26:19,352 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T19:26:19,353 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-16T19:26:19,358 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-16T19:26:19,358 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T19:26:19,387 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T19:26:19,395 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41597-0x1004a0015560000, quorum=127.0.0.1:55736, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T19:26:19,397 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T19:26:19,399 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T19:26:19,401 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41597-0x1004a0015560000, quorum=127.0.0.1:55736, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T19:26:19,402 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T19:26:19,404 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T19:26:19,408 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41597-0x1004a0015560000, quorum=127.0.0.1:55736, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T19:26:19,409 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T19:26:19,410 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41597-0x1004a0015560000, quorum=127.0.0.1:55736, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T19:26:19,411 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T19:26:19,427 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41597-0x1004a0015560000, quorum=127.0.0.1:55736, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T19:26:19,428 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T19:26:19,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34913-0x1004a0015560001, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T19:26:19,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1004a0015560000, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T19:26:19,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34913-0x1004a0015560001, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:26:19,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1004a0015560000, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:26:19,435 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=d11ab77873cb,41597,1731785178126, sessionid=0x1004a0015560000, setting cluster-up flag (Was=false) 2024-11-16T19:26:19,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1004a0015560000, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:26:19,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34913-0x1004a0015560001, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:26:19,452 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T19:26:19,454 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d11ab77873cb,41597,1731785178126 2024-11-16T19:26:19,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1004a0015560000, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:26:19,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34913-0x1004a0015560001, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:26:19,462 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T19:26:19,464 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d11ab77873cb,41597,1731785178126 2024-11-16T19:26:19,470 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T19:26:19,486 INFO [RS:0;d11ab77873cb:34913 {}] regionserver.HRegionServer(746): ClusterId : 42c13b42-1693-4a59-b3fa-2f412a370d6f 2024-11-16T19:26:19,489 DEBUG [RS:0;d11ab77873cb:34913 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T19:26:19,493 DEBUG [RS:0;d11ab77873cb:34913 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T19:26:19,493 DEBUG [RS:0;d11ab77873cb:34913 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T19:26:19,496 DEBUG [RS:0;d11ab77873cb:34913 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T19:26:19,496 DEBUG [RS:0;d11ab77873cb:34913 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b889f05, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d11ab77873cb/172.17.0.2:0 2024-11-16T19:26:19,509 DEBUG [RS:0;d11ab77873cb:34913 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;d11ab77873cb:34913 2024-11-16T19:26:19,512 INFO [RS:0;d11ab77873cb:34913 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T19:26:19,512 INFO [RS:0;d11ab77873cb:34913 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T19:26:19,512 DEBUG [RS:0;d11ab77873cb:34913 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T19:26:19,515 INFO [RS:0;d11ab77873cb:34913 {}] regionserver.HRegionServer(2659): reportForDuty to master=d11ab77873cb,41597,1731785178126 with port=34913, startcode=1731785178695 2024-11-16T19:26:19,525 DEBUG [RS:0;d11ab77873cb:34913 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T19:26:19,536 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T19:26:19,544 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T19:26:19,549 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T19:26:19,555 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: d11ab77873cb,41597,1731785178126 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T19:26:19,563 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/d11ab77873cb:0, corePoolSize=5, maxPoolSize=5 2024-11-16T19:26:19,563 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/d11ab77873cb:0, corePoolSize=5, maxPoolSize=5 2024-11-16T19:26:19,563 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/d11ab77873cb:0, corePoolSize=5, maxPoolSize=5 2024-11-16T19:26:19,563 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/d11ab77873cb:0, corePoolSize=5, maxPoolSize=5 2024-11-16T19:26:19,563 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/d11ab77873cb:0, corePoolSize=10, maxPoolSize=10 2024-11-16T19:26:19,564 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:26:19,564 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/d11ab77873cb:0, corePoolSize=2, maxPoolSize=2 2024-11-16T19:26:19,564 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:26:19,566 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731785209566 2024-11-16T19:26:19,568 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T19:26:19,569 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T19:26:19,570 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T19:26:19,571 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T19:26:19,573 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T19:26:19,574 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T19:26:19,574 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T19:26:19,575 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T19:26:19,577 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:26:19,577 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T19:26:19,577 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T19:26:19,581 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T19:26:19,583 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T19:26:19,583 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T19:26:19,586 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T19:26:19,586 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T19:26:19,593 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.large.0-1731785179587,5,FailOnTimeoutGroup] 2024-11-16T19:26:19,594 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34447, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T19:26:19,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741831_1007 (size=1321) 2024-11-16T19:26:19,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41287 is added to blk_1073741831_1007 (size=1321) 2024-11-16T19:26:19,597 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T19:26:19,597 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.small.0-1731785179593,5,FailOnTimeoutGroup] 2024-11-16T19:26:19,597 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T19:26:19,597 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T19:26:19,597 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a 2024-11-16T19:26:19,598 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T19:26:19,599 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T19:26:19,603 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41597 {}] master.ServerManager(363): Checking decommissioned status of RegionServer d11ab77873cb,34913,1731785178695 2024-11-16T19:26:19,606 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41597 {}] master.ServerManager(517): Registering regionserver=d11ab77873cb,34913,1731785178695 2024-11-16T19:26:19,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41287 is added to blk_1073741832_1008 (size=32) 2024-11-16T19:26:19,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741832_1008 (size=32) 2024-11-16T19:26:19,614 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:26:19,617 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T19:26:19,620 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T19:26:19,620 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:26:19,622 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:26:19,622 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T19:26:19,625 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T19:26:19,625 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:26:19,625 DEBUG [RS:0;d11ab77873cb:34913 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a 2024-11-16T19:26:19,625 DEBUG [RS:0;d11ab77873cb:34913 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33243 2024-11-16T19:26:19,625 DEBUG [RS:0;d11ab77873cb:34913 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T19:26:19,626 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:26:19,626 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T19:26:19,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1004a0015560000, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T19:26:19,630 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T19:26:19,630 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:26:19,630 DEBUG [RS:0;d11ab77873cb:34913 {}] zookeeper.ZKUtil(111): regionserver:34913-0x1004a0015560001, quorum=127.0.0.1:55736, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/d11ab77873cb,34913,1731785178695 2024-11-16T19:26:19,631 WARN [RS:0;d11ab77873cb:34913 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T19:26:19,631 INFO [RS:0;d11ab77873cb:34913 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T19:26:19,631 DEBUG [RS:0;d11ab77873cb:34913 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/WALs/d11ab77873cb,34913,1731785178695 2024-11-16T19:26:19,632 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:26:19,632 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T19:26:19,633 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [d11ab77873cb,34913,1731785178695] 2024-11-16T19:26:19,635 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T19:26:19,635 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:26:19,636 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:26:19,637 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T19:26:19,638 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/hbase/meta/1588230740 2024-11-16T19:26:19,639 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/hbase/meta/1588230740 2024-11-16T19:26:19,642 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T19:26:19,642 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T19:26:19,644 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T19:26:19,646 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T19:26:19,650 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T19:26:19,652 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=753470, jitterRate=-0.04191340506076813}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T19:26:19,657 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731785179614Initializing all the Stores at 1731785179616 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785179616Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785179617 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785179617Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785179617Cleaning up temporary data from old regions at 1731785179643 (+26 ms)Region opened successfully at 1731785179657 (+14 ms) 2024-11-16T19:26:19,657 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T19:26:19,658 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T19:26:19,658 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T19:26:19,658 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T19:26:19,658 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T19:26:19,659 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T19:26:19,660 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731785179657Disabling compacts and flushes for region at 1731785179657Disabling writes for close at 1731785179658 (+1 ms)Writing region close event to WAL at 1731785179659 (+1 ms)Closed at 1731785179659 2024-11-16T19:26:19,661 INFO [RS:0;d11ab77873cb:34913 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T19:26:19,664 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T19:26:19,664 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T19:26:19,673 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T19:26:19,681 INFO [RS:0;d11ab77873cb:34913 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T19:26:19,684 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T19:26:19,687 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T19:26:19,687 INFO [RS:0;d11ab77873cb:34913 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T19:26:19,687 INFO [RS:0;d11ab77873cb:34913 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T19:26:19,688 INFO [RS:0;d11ab77873cb:34913 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T19:26:19,695 INFO [RS:0;d11ab77873cb:34913 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T19:26:19,696 INFO [RS:0;d11ab77873cb:34913 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T19:26:19,697 DEBUG [RS:0;d11ab77873cb:34913 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:26:19,697 DEBUG [RS:0;d11ab77873cb:34913 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:26:19,697 DEBUG [RS:0;d11ab77873cb:34913 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:26:19,697 DEBUG [RS:0;d11ab77873cb:34913 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:26:19,697 DEBUG [RS:0;d11ab77873cb:34913 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:26:19,697 DEBUG [RS:0;d11ab77873cb:34913 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/d11ab77873cb:0, corePoolSize=2, maxPoolSize=2 2024-11-16T19:26:19,698 DEBUG [RS:0;d11ab77873cb:34913 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:26:19,698 DEBUG [RS:0;d11ab77873cb:34913 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:26:19,698 DEBUG [RS:0;d11ab77873cb:34913 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:26:19,698 DEBUG [RS:0;d11ab77873cb:34913 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:26:19,698 DEBUG [RS:0;d11ab77873cb:34913 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:26:19,698 DEBUG [RS:0;d11ab77873cb:34913 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:26:19,698 DEBUG [RS:0;d11ab77873cb:34913 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/d11ab77873cb:0, corePoolSize=3, maxPoolSize=3 2024-11-16T19:26:19,699 DEBUG [RS:0;d11ab77873cb:34913 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0, corePoolSize=3, maxPoolSize=3 2024-11-16T19:26:19,699 INFO [RS:0;d11ab77873cb:34913 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T19:26:19,700 INFO [RS:0;d11ab77873cb:34913 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T19:26:19,700 INFO [RS:0;d11ab77873cb:34913 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T19:26:19,700 INFO [RS:0;d11ab77873cb:34913 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T19:26:19,700 INFO [RS:0;d11ab77873cb:34913 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T19:26:19,700 INFO [RS:0;d11ab77873cb:34913 {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,34913,1731785178695-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T19:26:19,716 INFO [RS:0;d11ab77873cb:34913 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T19:26:19,718 INFO [RS:0;d11ab77873cb:34913 {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,34913,1731785178695-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T19:26:19,718 INFO [RS:0;d11ab77873cb:34913 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:26:19,718 INFO [RS:0;d11ab77873cb:34913 {}] regionserver.Replication(171): d11ab77873cb,34913,1731785178695 started 2024-11-16T19:26:19,738 INFO [RS:0;d11ab77873cb:34913 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:26:19,739 INFO [RS:0;d11ab77873cb:34913 {}] regionserver.HRegionServer(1482): Serving as d11ab77873cb,34913,1731785178695, RpcServer on d11ab77873cb/172.17.0.2:34913, sessionid=0x1004a0015560001 2024-11-16T19:26:19,740 DEBUG [RS:0;d11ab77873cb:34913 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T19:26:19,740 DEBUG [RS:0;d11ab77873cb:34913 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager d11ab77873cb,34913,1731785178695 2024-11-16T19:26:19,740 DEBUG [RS:0;d11ab77873cb:34913 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd11ab77873cb,34913,1731785178695' 2024-11-16T19:26:19,741 DEBUG [RS:0;d11ab77873cb:34913 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T19:26:19,742 DEBUG [RS:0;d11ab77873cb:34913 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T19:26:19,742 DEBUG [RS:0;d11ab77873cb:34913 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T19:26:19,743 DEBUG [RS:0;d11ab77873cb:34913 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T19:26:19,743 DEBUG [RS:0;d11ab77873cb:34913 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager d11ab77873cb,34913,1731785178695 2024-11-16T19:26:19,743 DEBUG [RS:0;d11ab77873cb:34913 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd11ab77873cb,34913,1731785178695' 2024-11-16T19:26:19,743 DEBUG [RS:0;d11ab77873cb:34913 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T19:26:19,744 DEBUG [RS:0;d11ab77873cb:34913 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T19:26:19,744 DEBUG [RS:0;d11ab77873cb:34913 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T19:26:19,745 INFO [RS:0;d11ab77873cb:34913 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T19:26:19,745 INFO [RS:0;d11ab77873cb:34913 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T19:26:19,838 WARN [d11ab77873cb:41597 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-16T19:26:19,855 INFO [RS:0;d11ab77873cb:34913 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d11ab77873cb%2C34913%2C1731785178695, suffix=, logDir=hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/WALs/d11ab77873cb,34913,1731785178695, archiveDir=hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/oldWALs, maxLogs=32 2024-11-16T19:26:19,860 INFO [RS:0;d11ab77873cb:34913 {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C34913%2C1731785178695.1731785179859 2024-11-16T19:26:19,868 INFO [RS:0;d11ab77873cb:34913 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/WALs/d11ab77873cb,34913,1731785178695/d11ab77873cb%2C34913%2C1731785178695.1731785179859 2024-11-16T19:26:19,870 DEBUG [RS:0;d11ab77873cb:34913 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41019:41019),(127.0.0.1/127.0.0.1:41507:41507)] 2024-11-16T19:26:20,094 DEBUG [d11ab77873cb:41597 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T19:26:20,109 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=d11ab77873cb,34913,1731785178695 2024-11-16T19:26:20,114 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d11ab77873cb,34913,1731785178695, state=OPENING 2024-11-16T19:26:20,118 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T19:26:20,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1004a0015560000, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:26:20,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34913-0x1004a0015560001, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:26:20,120 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T19:26:20,121 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T19:26:20,122 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T19:26:20,124 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=d11ab77873cb,34913,1731785178695}] 2024-11-16T19:26:20,301 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T19:26:20,305 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58137, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T19:26:20,316 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T19:26:20,317 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T19:26:20,321 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d11ab77873cb%2C34913%2C1731785178695.meta, suffix=.meta, logDir=hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/WALs/d11ab77873cb,34913,1731785178695, archiveDir=hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/oldWALs, maxLogs=32 2024-11-16T19:26:20,323 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C34913%2C1731785178695.meta.1731785180323.meta 2024-11-16T19:26:20,331 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/WALs/d11ab77873cb,34913,1731785178695/d11ab77873cb%2C34913%2C1731785178695.meta.1731785180323.meta 2024-11-16T19:26:20,335 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41019:41019),(127.0.0.1/127.0.0.1:41507:41507)] 2024-11-16T19:26:20,336 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T19:26:20,338 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T19:26:20,340 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T19:26:20,345 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T19:26:20,349 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T19:26:20,349 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:26:20,350 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T19:26:20,350 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T19:26:20,353 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T19:26:20,355 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T19:26:20,355 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:26:20,356 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:26:20,357 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T19:26:20,358 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T19:26:20,359 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:26:20,360 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:26:20,360 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T19:26:20,362 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T19:26:20,362 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:26:20,363 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:26:20,363 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T19:26:20,365 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T19:26:20,365 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:26:20,366 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:26:20,366 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T19:26:20,368 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/hbase/meta/1588230740 2024-11-16T19:26:20,371 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/hbase/meta/1588230740 2024-11-16T19:26:20,374 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T19:26:20,374 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T19:26:20,375 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T19:26:20,378 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T19:26:20,380 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=750967, jitterRate=-0.04509700834751129}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T19:26:20,380 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T19:26:20,381 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731785180350Writing region info on filesystem at 1731785180350Initializing all the Stores at 1731785180352 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785180352Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785180352Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785180352Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785180352Cleaning up temporary data from old regions at 1731785180374 (+22 ms)Running coprocessor post-open hooks at 1731785180380 (+6 ms)Region opened successfully at 1731785180381 (+1 ms) 2024-11-16T19:26:20,387 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731785180294 2024-11-16T19:26:20,397 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T19:26:20,398 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T19:26:20,399 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=d11ab77873cb,34913,1731785178695 2024-11-16T19:26:20,401 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d11ab77873cb,34913,1731785178695, state=OPEN 2024-11-16T19:26:20,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1004a0015560000, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T19:26:20,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34913-0x1004a0015560001, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T19:26:20,404 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T19:26:20,404 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T19:26:20,404 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=d11ab77873cb,34913,1731785178695 2024-11-16T19:26:20,410 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T19:26:20,410 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=d11ab77873cb,34913,1731785178695 in 281 msec 2024-11-16T19:26:20,419 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T19:26:20,419 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 740 msec 2024-11-16T19:26:20,420 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T19:26:20,421 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T19:26:20,439 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T19:26:20,441 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d11ab77873cb,34913,1731785178695, seqNum=-1] 2024-11-16T19:26:20,464 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T19:26:20,466 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37135, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T19:26:20,486 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 987 msec 2024-11-16T19:26:20,486 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731785180486, completionTime=-1 2024-11-16T19:26:20,488 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T19:26:20,489 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-16T19:26:20,512 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-16T19:26:20,512 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731785240512 2024-11-16T19:26:20,512 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731785300512 2024-11-16T19:26:20,513 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 23 msec 2024-11-16T19:26:20,515 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,41597,1731785178126-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T19:26:20,516 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,41597,1731785178126-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:26:20,516 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,41597,1731785178126-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:26:20,517 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-d11ab77873cb:41597, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:26:20,518 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T19:26:20,518 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T19:26:20,524 DEBUG [master/d11ab77873cb:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T19:26:20,546 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.717sec 2024-11-16T19:26:20,547 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T19:26:20,549 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T19:26:20,550 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T19:26:20,550 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T19:26:20,550 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T19:26:20,551 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,41597,1731785178126-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T19:26:20,551 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,41597,1731785178126-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T19:26:20,560 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T19:26:20,561 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T19:26:20,561 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,41597,1731785178126-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:26:20,595 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b598c24, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T19:26:20,597 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-16T19:26:20,598 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-16T19:26:20,601 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request d11ab77873cb,41597,-1 for getting cluster id 2024-11-16T19:26:20,603 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T19:26:20,611 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '42c13b42-1693-4a59-b3fa-2f412a370d6f' 2024-11-16T19:26:20,614 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T19:26:20,614 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "42c13b42-1693-4a59-b3fa-2f412a370d6f" 2024-11-16T19:26:20,615 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c9f8695, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T19:26:20,615 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [d11ab77873cb,41597,-1] 2024-11-16T19:26:20,617 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T19:26:20,619 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:26:20,621 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59896, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T19:26:20,624 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36e15a4d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T19:26:20,624 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T19:26:20,631 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d11ab77873cb,34913,1731785178695, seqNum=-1] 2024-11-16T19:26:20,632 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T19:26:20,634 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35406, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T19:26:20,652 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=d11ab77873cb,41597,1731785178126 2024-11-16T19:26:20,652 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:26:20,659 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T19:26:20,663 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-16T19:26:20,668 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is d11ab77873cb,41597,1731785178126 2024-11-16T19:26:20,671 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@6059cfed 2024-11-16T19:26:20,672 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-16T19:26:20,675 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59902, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-16T19:26:20,677 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41597 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-16T19:26:20,677 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41597 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-16T19:26:20,682 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41597 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T19:26:20,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41597 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-16T19:26:20,694 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T19:26:20,696 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41597 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-16T19:26:20,696 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:26:20,699 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T19:26:20,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41597 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T19:26:20,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41287 is added to blk_1073741835_1011 (size=389) 2024-11-16T19:26:20,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741835_1011 (size=389) 2024-11-16T19:26:20,748 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c55727d527c475aa76261629fa04dca7, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731785180676.c55727d527c475aa76261629fa04dca7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a 2024-11-16T19:26:20,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741836_1012 (size=72) 2024-11-16T19:26:20,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41287 is added to blk_1073741836_1012 (size=72) 2024-11-16T19:26:20,761 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731785180676.c55727d527c475aa76261629fa04dca7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:26:20,761 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing c55727d527c475aa76261629fa04dca7, disabling compactions & flushes 2024-11-16T19:26:20,762 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731785180676.c55727d527c475aa76261629fa04dca7. 2024-11-16T19:26:20,762 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731785180676.c55727d527c475aa76261629fa04dca7. 2024-11-16T19:26:20,762 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731785180676.c55727d527c475aa76261629fa04dca7. after waiting 0 ms 2024-11-16T19:26:20,762 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731785180676.c55727d527c475aa76261629fa04dca7. 2024-11-16T19:26:20,762 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731785180676.c55727d527c475aa76261629fa04dca7. 2024-11-16T19:26:20,762 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for c55727d527c475aa76261629fa04dca7: Waiting for close lock at 1731785180761Disabling compacts and flushes for region at 1731785180761Disabling writes for close at 1731785180762 (+1 ms)Writing region close event to WAL at 1731785180762Closed at 1731785180762 2024-11-16T19:26:20,764 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T19:26:20,768 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731785180676.c55727d527c475aa76261629fa04dca7.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731785180764"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731785180764"}]},"ts":"1731785180764"} 2024-11-16T19:26:20,773 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-16T19:26:20,775 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T19:26:20,778 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731785180775"}]},"ts":"1731785180775"} 2024-11-16T19:26:20,782 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-16T19:26:20,784 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=c55727d527c475aa76261629fa04dca7, ASSIGN}] 2024-11-16T19:26:20,786 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=c55727d527c475aa76261629fa04dca7, ASSIGN 2024-11-16T19:26:20,788 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=c55727d527c475aa76261629fa04dca7, ASSIGN; state=OFFLINE, location=d11ab77873cb,34913,1731785178695; forceNewPlan=false, retain=false 2024-11-16T19:26:20,941 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c55727d527c475aa76261629fa04dca7, regionState=OPENING, regionLocation=d11ab77873cb,34913,1731785178695 2024-11-16T19:26:20,948 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=c55727d527c475aa76261629fa04dca7, ASSIGN because future has completed 2024-11-16T19:26:20,950 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c55727d527c475aa76261629fa04dca7, server=d11ab77873cb,34913,1731785178695}] 2024-11-16T19:26:21,117 INFO [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731785180676.c55727d527c475aa76261629fa04dca7. 2024-11-16T19:26:21,117 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => c55727d527c475aa76261629fa04dca7, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731785180676.c55727d527c475aa76261629fa04dca7.', STARTKEY => '', ENDKEY => ''} 2024-11-16T19:26:21,118 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling c55727d527c475aa76261629fa04dca7 2024-11-16T19:26:21,118 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731785180676.c55727d527c475aa76261629fa04dca7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:26:21,118 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for c55727d527c475aa76261629fa04dca7 2024-11-16T19:26:21,118 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for c55727d527c475aa76261629fa04dca7 2024-11-16T19:26:21,121 INFO [StoreOpener-c55727d527c475aa76261629fa04dca7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region c55727d527c475aa76261629fa04dca7 2024-11-16T19:26:21,124 INFO [StoreOpener-c55727d527c475aa76261629fa04dca7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c55727d527c475aa76261629fa04dca7 columnFamilyName info 2024-11-16T19:26:21,124 DEBUG [StoreOpener-c55727d527c475aa76261629fa04dca7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:26:21,126 INFO [StoreOpener-c55727d527c475aa76261629fa04dca7-1 {}] regionserver.HStore(327): Store=c55727d527c475aa76261629fa04dca7/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T19:26:21,126 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for c55727d527c475aa76261629fa04dca7 2024-11-16T19:26:21,127 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7 2024-11-16T19:26:21,128 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7 2024-11-16T19:26:21,129 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for c55727d527c475aa76261629fa04dca7 2024-11-16T19:26:21,129 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for c55727d527c475aa76261629fa04dca7 2024-11-16T19:26:21,132 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for c55727d527c475aa76261629fa04dca7 2024-11-16T19:26:21,135 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T19:26:21,136 INFO [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened c55727d527c475aa76261629fa04dca7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=713677, jitterRate=-0.09251390397548676}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T19:26:21,136 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c55727d527c475aa76261629fa04dca7 2024-11-16T19:26:21,137 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for c55727d527c475aa76261629fa04dca7: Running coprocessor pre-open hook at 1731785181119Writing region info on filesystem at 1731785181119Initializing all the Stores at 1731785181121 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785181121Cleaning up temporary data from old regions at 1731785181129 (+8 ms)Running coprocessor post-open hooks at 1731785181136 (+7 ms)Region opened successfully at 1731785181137 (+1 ms) 2024-11-16T19:26:21,139 INFO [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731785180676.c55727d527c475aa76261629fa04dca7., pid=6, masterSystemTime=1731785181105 2024-11-16T19:26:21,143 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731785180676.c55727d527c475aa76261629fa04dca7. 2024-11-16T19:26:21,143 INFO [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731785180676.c55727d527c475aa76261629fa04dca7. 2024-11-16T19:26:21,144 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c55727d527c475aa76261629fa04dca7, regionState=OPEN, openSeqNum=2, regionLocation=d11ab77873cb,34913,1731785178695 2024-11-16T19:26:21,148 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c55727d527c475aa76261629fa04dca7, server=d11ab77873cb,34913,1731785178695 because future has completed 2024-11-16T19:26:21,154 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-16T19:26:21,155 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure c55727d527c475aa76261629fa04dca7, server=d11ab77873cb,34913,1731785178695 in 201 msec 2024-11-16T19:26:21,159 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-16T19:26:21,159 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=c55727d527c475aa76261629fa04dca7, ASSIGN in 370 msec 2024-11-16T19:26:21,161 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T19:26:21,161 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731785181161"}]},"ts":"1731785181161"} 2024-11-16T19:26:21,165 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-16T19:26:21,167 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T19:26:21,170 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 482 msec 2024-11-16T19:26:25,853 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-16T19:26:25,921 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T19:26:25,924 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-16T19:26:28,455 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T19:26:28,455 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-16T19:26:28,457 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-16T19:26:28,457 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-16T19:26:28,459 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T19:26:28,459 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-16T19:26:28,460 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-16T19:26:28,460 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-16T19:26:30,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41597 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T19:26:30,741 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-16T19:26:30,743 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-16T19:26:30,751 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-16T19:26:30,752 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731785180676.c55727d527c475aa76261629fa04dca7. 2024-11-16T19:26:30,753 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C34913%2C1731785178695.1731785190753 2024-11-16T19:26:30,771 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:26:30,772 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:26:30,772 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:26:30,772 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:26:30,772 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:26:30,773 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/WALs/d11ab77873cb,34913,1731785178695/d11ab77873cb%2C34913%2C1731785178695.1731785179859 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/WALs/d11ab77873cb,34913,1731785178695/d11ab77873cb%2C34913%2C1731785178695.1731785190753 2024-11-16T19:26:30,775 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41019:41019),(127.0.0.1/127.0.0.1:41507:41507)] 2024-11-16T19:26:30,775 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/WALs/d11ab77873cb,34913,1731785178695/d11ab77873cb%2C34913%2C1731785178695.1731785179859 is not closed yet, will try archiving it next time 2024-11-16T19:26:30,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741833_1009 (size=451) 2024-11-16T19:26:30,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41287 is added to blk_1073741833_1009 (size=451) 2024-11-16T19:26:30,779 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/WALs/d11ab77873cb,34913,1731785178695/d11ab77873cb%2C34913%2C1731785178695.1731785179859 to hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/oldWALs/d11ab77873cb%2C34913%2C1731785178695.1731785179859 2024-11-16T19:26:30,786 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731785180676.c55727d527c475aa76261629fa04dca7., hostname=d11ab77873cb,34913,1731785178695, seqNum=2] 2024-11-16T19:26:42,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34913 {}] regionserver.HRegion(8855): Flush requested on c55727d527c475aa76261629fa04dca7 2024-11-16T19:26:42,830 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c55727d527c475aa76261629fa04dca7 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T19:26:42,898 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/.tmp/info/70012d9f5d4b4bc7b891acdfc58d8a94 is 1080, key is row0001/info:/1731785190789/Put/seqid=0 2024-11-16T19:26:42,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741838_1014 (size=12509) 2024-11-16T19:26:42,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41287 is added to blk_1073741838_1014 (size=12509) 2024-11-16T19:26:42,911 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/.tmp/info/70012d9f5d4b4bc7b891acdfc58d8a94 2024-11-16T19:26:42,954 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/.tmp/info/70012d9f5d4b4bc7b891acdfc58d8a94 as hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/info/70012d9f5d4b4bc7b891acdfc58d8a94 2024-11-16T19:26:42,966 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/info/70012d9f5d4b4bc7b891acdfc58d8a94, entries=7, sequenceid=11, filesize=12.2 K 2024-11-16T19:26:42,972 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for c55727d527c475aa76261629fa04dca7 in 144ms, sequenceid=11, compaction requested=false 2024-11-16T19:26:42,973 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c55727d527c475aa76261629fa04dca7: 2024-11-16T19:26:47,267 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T19:26:50,843 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C34913%2C1731785178695.1731785210842 2024-11-16T19:26:51,053 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 207 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41287,DS-8c3d13ef-75aa-43d5-b271-3f7a588a8e62,DISK], DatanodeInfoWithStorage[127.0.0.1:39641,DS-81e019ed-0d69-4373-add4-83a5cd5128da,DISK]] 2024-11-16T19:26:51,053 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:26:51,053 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:26:51,053 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:26:51,054 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:26:51,054 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:26:51,054 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/WALs/d11ab77873cb,34913,1731785178695/d11ab77873cb%2C34913%2C1731785178695.1731785190753 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/WALs/d11ab77873cb,34913,1731785178695/d11ab77873cb%2C34913%2C1731785178695.1731785210842 2024-11-16T19:26:51,056 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41019:41019),(127.0.0.1/127.0.0.1:41507:41507)] 2024-11-16T19:26:51,056 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/WALs/d11ab77873cb,34913,1731785178695/d11ab77873cb%2C34913%2C1731785178695.1731785190753 is not closed yet, will try archiving it next time 2024-11-16T19:26:51,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741837_1013 (size=12399) 2024-11-16T19:26:51,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41287 is added to blk_1073741837_1013 (size=12399) 2024-11-16T19:26:51,260 INFO [FSHLog-0-hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a-prefix:d11ab77873cb,34913,1731785178695 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41287,DS-8c3d13ef-75aa-43d5-b271-3f7a588a8e62,DISK], DatanodeInfoWithStorage[127.0.0.1:39641,DS-81e019ed-0d69-4373-add4-83a5cd5128da,DISK]] 2024-11-16T19:26:53,468 INFO [FSHLog-0-hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a-prefix:d11ab77873cb,34913,1731785178695 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41287,DS-8c3d13ef-75aa-43d5-b271-3f7a588a8e62,DISK], DatanodeInfoWithStorage[127.0.0.1:39641,DS-81e019ed-0d69-4373-add4-83a5cd5128da,DISK]] 2024-11-16T19:26:55,676 INFO [FSHLog-0-hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a-prefix:d11ab77873cb,34913,1731785178695 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41287,DS-8c3d13ef-75aa-43d5-b271-3f7a588a8e62,DISK], DatanodeInfoWithStorage[127.0.0.1:39641,DS-81e019ed-0d69-4373-add4-83a5cd5128da,DISK]] 2024-11-16T19:26:57,881 INFO [FSHLog-0-hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a-prefix:d11ab77873cb,34913,1731785178695 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41287,DS-8c3d13ef-75aa-43d5-b271-3f7a588a8e62,DISK], DatanodeInfoWithStorage[127.0.0.1:39641,DS-81e019ed-0d69-4373-add4-83a5cd5128da,DISK]] 2024-11-16T19:26:57,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34913 {}] regionserver.HRegion(8855): Flush requested on c55727d527c475aa76261629fa04dca7 2024-11-16T19:26:57,881 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c55727d527c475aa76261629fa04dca7 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T19:26:58,083 INFO [FSHLog-0-hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a-prefix:d11ab77873cb,34913,1731785178695 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41287,DS-8c3d13ef-75aa-43d5-b271-3f7a588a8e62,DISK], DatanodeInfoWithStorage[127.0.0.1:39641,DS-81e019ed-0d69-4373-add4-83a5cd5128da,DISK]] 2024-11-16T19:26:58,088 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/.tmp/info/ef8dda23d8ef4660813f39b5580b8f41 is 1080, key is row0008/info:/1731785204827/Put/seqid=0 2024-11-16T19:26:58,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41287 is added to blk_1073741840_1016 (size=12509) 2024-11-16T19:26:58,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741840_1016 (size=12509) 2024-11-16T19:26:58,098 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/.tmp/info/ef8dda23d8ef4660813f39b5580b8f41 2024-11-16T19:26:58,108 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/.tmp/info/ef8dda23d8ef4660813f39b5580b8f41 as hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/info/ef8dda23d8ef4660813f39b5580b8f41 2024-11-16T19:26:58,119 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/info/ef8dda23d8ef4660813f39b5580b8f41, entries=7, sequenceid=21, filesize=12.2 K 2024-11-16T19:26:58,322 INFO [FSHLog-0-hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a-prefix:d11ab77873cb,34913,1731785178695 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41287,DS-8c3d13ef-75aa-43d5-b271-3f7a588a8e62,DISK], DatanodeInfoWithStorage[127.0.0.1:39641,DS-81e019ed-0d69-4373-add4-83a5cd5128da,DISK]] 2024-11-16T19:26:58,322 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for c55727d527c475aa76261629fa04dca7 in 441ms, sequenceid=21, compaction requested=false 2024-11-16T19:26:58,322 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c55727d527c475aa76261629fa04dca7: 2024-11-16T19:26:58,323 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-16T19:26:58,323 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T19:26:58,324 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/info/70012d9f5d4b4bc7b891acdfc58d8a94 because midkey is the same as first or last row 2024-11-16T19:27:00,085 INFO [FSHLog-0-hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a-prefix:d11ab77873cb,34913,1731785178695 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41287,DS-8c3d13ef-75aa-43d5-b271-3f7a588a8e62,DISK], DatanodeInfoWithStorage[127.0.0.1:39641,DS-81e019ed-0d69-4373-add4-83a5cd5128da,DISK]] 2024-11-16T19:27:00,565 INFO [master/d11ab77873cb:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-16T19:27:00,565 INFO [master/d11ab77873cb:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-16T19:27:02,289 INFO [FSHLog-0-hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a-prefix:d11ab77873cb,34913,1731785178695 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41287,DS-8c3d13ef-75aa-43d5-b271-3f7a588a8e62,DISK], DatanodeInfoWithStorage[127.0.0.1:39641,DS-81e019ed-0d69-4373-add4-83a5cd5128da,DISK]] 2024-11-16T19:27:02,293 WARN [FSHLog-0-hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a-prefix:d11ab77873cb,34913,1731785178695 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41287,DS-8c3d13ef-75aa-43d5-b271-3f7a588a8e62,DISK], DatanodeInfoWithStorage[127.0.0.1:39641,DS-81e019ed-0d69-4373-add4-83a5cd5128da,DISK]] 2024-11-16T19:27:02,295 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d11ab77873cb%2C34913%2C1731785178695:(num 1731785210842) roll requested 2024-11-16T19:27:02,296 INFO [regionserver/d11ab77873cb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C34913%2C1731785178695.1731785222295 2024-11-16T19:27:02,507 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 208 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41287,DS-8c3d13ef-75aa-43d5-b271-3f7a588a8e62,DISK], DatanodeInfoWithStorage[127.0.0.1:39641,DS-81e019ed-0d69-4373-add4-83a5cd5128da,DISK]] 2024-11-16T19:27:02,508 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:02,508 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:02,508 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:02,509 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:02,509 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:02,509 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/WALs/d11ab77873cb,34913,1731785178695/d11ab77873cb%2C34913%2C1731785178695.1731785210842 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/WALs/d11ab77873cb,34913,1731785178695/d11ab77873cb%2C34913%2C1731785178695.1731785222295 2024-11-16T19:27:02,511 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41507:41507),(127.0.0.1/127.0.0.1:41019:41019)] 2024-11-16T19:27:02,511 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/WALs/d11ab77873cb,34913,1731785178695/d11ab77873cb%2C34913%2C1731785178695.1731785210842 is not closed yet, will try archiving it next time 2024-11-16T19:27:02,511 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/WALs/d11ab77873cb,34913,1731785178695/d11ab77873cb%2C34913%2C1731785178695.1731785190753 to hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/oldWALs/d11ab77873cb%2C34913%2C1731785178695.1731785190753 2024-11-16T19:27:02,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741839_1015 (size=7739) 2024-11-16T19:27:02,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41287 is added to blk_1073741839_1015 (size=7739) 2024-11-16T19:27:04,494 INFO [FSHLog-0-hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a-prefix:d11ab77873cb,34913,1731785178695 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39641,DS-81e019ed-0d69-4373-add4-83a5cd5128da,DISK], DatanodeInfoWithStorage[127.0.0.1:41287,DS-8c3d13ef-75aa-43d5-b271-3f7a588a8e62,DISK]] 2024-11-16T19:27:06,118 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region c55727d527c475aa76261629fa04dca7, had cached 0 bytes from a total of 25018 2024-11-16T19:27:06,698 INFO [FSHLog-0-hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a-prefix:d11ab77873cb,34913,1731785178695 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39641,DS-81e019ed-0d69-4373-add4-83a5cd5128da,DISK], DatanodeInfoWithStorage[127.0.0.1:41287,DS-8c3d13ef-75aa-43d5-b271-3f7a588a8e62,DISK]] 2024-11-16T19:27:08,902 INFO [FSHLog-0-hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a-prefix:d11ab77873cb,34913,1731785178695 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39641,DS-81e019ed-0d69-4373-add4-83a5cd5128da,DISK], DatanodeInfoWithStorage[127.0.0.1:41287,DS-8c3d13ef-75aa-43d5-b271-3f7a588a8e62,DISK]] 2024-11-16T19:27:11,108 INFO [FSHLog-0-hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a-prefix:d11ab77873cb,34913,1731785178695 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39641,DS-81e019ed-0d69-4373-add4-83a5cd5128da,DISK], DatanodeInfoWithStorage[127.0.0.1:41287,DS-8c3d13ef-75aa-43d5-b271-3f7a588a8e62,DISK]] 2024-11-16T19:27:13,111 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T19:27:13,111 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C34913%2C1731785178695.1731785233111 2024-11-16T19:27:17,267 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T19:27:18,122 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5007 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39641,DS-81e019ed-0d69-4373-add4-83a5cd5128da,DISK], DatanodeInfoWithStorage[127.0.0.1:41287,DS-8c3d13ef-75aa-43d5-b271-3f7a588a8e62,DISK]] 2024-11-16T19:27:18,124 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5007 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39641,DS-81e019ed-0d69-4373-add4-83a5cd5128da,DISK], DatanodeInfoWithStorage[127.0.0.1:41287,DS-8c3d13ef-75aa-43d5-b271-3f7a588a8e62,DISK]] 2024-11-16T19:27:18,124 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d11ab77873cb%2C34913%2C1731785178695:(num 1731785233111) roll requested 2024-11-16T19:27:18,124 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:18,124 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:18,125 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:18,125 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:18,125 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:18,125 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/WALs/d11ab77873cb,34913,1731785178695/d11ab77873cb%2C34913%2C1731785178695.1731785222295 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/WALs/d11ab77873cb,34913,1731785178695/d11ab77873cb%2C34913%2C1731785178695.1731785233111 2024-11-16T19:27:18,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41287 is added to blk_1073741841_1017 (size=4753) 2024-11-16T19:27:18,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741841_1017 (size=4753) 2024-11-16T19:27:18,133 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41507:41507),(127.0.0.1/127.0.0.1:41019:41019)] 2024-11-16T19:27:18,133 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/WALs/d11ab77873cb,34913,1731785178695/d11ab77873cb%2C34913%2C1731785178695.1731785222295 is not closed yet, will try archiving it next time 2024-11-16T19:27:18,134 INFO [regionserver/d11ab77873cb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C34913%2C1731785178695.1731785238133 2024-11-16T19:27:23,137 INFO [FSHLog-0-hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a-prefix:d11ab77873cb,34913,1731785178695 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39641,DS-81e019ed-0d69-4373-add4-83a5cd5128da,DISK], DatanodeInfoWithStorage[127.0.0.1:41287,DS-8c3d13ef-75aa-43d5-b271-3f7a588a8e62,DISK]] 2024-11-16T19:27:23,137 WARN [FSHLog-0-hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a-prefix:d11ab77873cb,34913,1731785178695 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39641,DS-81e019ed-0d69-4373-add4-83a5cd5128da,DISK], DatanodeInfoWithStorage[127.0.0.1:41287,DS-8c3d13ef-75aa-43d5-b271-3f7a588a8e62,DISK]] 2024-11-16T19:27:23,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34913 {}] regionserver.HRegion(8855): Flush requested on c55727d527c475aa76261629fa04dca7 2024-11-16T19:27:23,137 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c55727d527c475aa76261629fa04dca7 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T19:27:23,144 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5008 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39641,DS-81e019ed-0d69-4373-add4-83a5cd5128da,DISK], DatanodeInfoWithStorage[127.0.0.1:41287,DS-8c3d13ef-75aa-43d5-b271-3f7a588a8e62,DISK]] 2024-11-16T19:27:23,144 WARN [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5008 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39641,DS-81e019ed-0d69-4373-add4-83a5cd5128da,DISK], DatanodeInfoWithStorage[127.0.0.1:41287,DS-8c3d13ef-75aa-43d5-b271-3f7a588a8e62,DISK]] 2024-11-16T19:27:25,138 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T19:27:28,140 INFO [FSHLog-0-hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a-prefix:d11ab77873cb,34913,1731785178695 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39641,DS-81e019ed-0d69-4373-add4-83a5cd5128da,DISK], DatanodeInfoWithStorage[127.0.0.1:41287,DS-8c3d13ef-75aa-43d5-b271-3f7a588a8e62,DISK]] 2024-11-16T19:27:28,140 WARN [FSHLog-0-hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a-prefix:d11ab77873cb,34913,1731785178695 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39641,DS-81e019ed-0d69-4373-add4-83a5cd5128da,DISK], DatanodeInfoWithStorage[127.0.0.1:41287,DS-8c3d13ef-75aa-43d5-b271-3f7a588a8e62,DISK]] 2024-11-16T19:27:28,140 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:28,140 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:28,140 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:28,140 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:28,141 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:28,141 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/WALs/d11ab77873cb,34913,1731785178695/d11ab77873cb%2C34913%2C1731785178695.1731785233111 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/WALs/d11ab77873cb,34913,1731785178695/d11ab77873cb%2C34913%2C1731785178695.1731785238133 2024-11-16T19:27:28,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41287 is added to blk_1073741842_1018 (size=1569) 2024-11-16T19:27:28,144 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41019:41019),(127.0.0.1/127.0.0.1:41507:41507)] 2024-11-16T19:27:28,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741842_1018 (size=1569) 2024-11-16T19:27:28,144 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/WALs/d11ab77873cb,34913,1731785178695/d11ab77873cb%2C34913%2C1731785178695.1731785233111 is not closed yet, will try archiving it next time 2024-11-16T19:27:28,144 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d11ab77873cb%2C34913%2C1731785178695:(num 1731785238133) roll requested 2024-11-16T19:27:28,144 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C34913%2C1731785178695.1731785248144 2024-11-16T19:27:28,146 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/.tmp/info/fb4ab094f1004da0b35151b35ae4cdc6 is 1080, key is row0015/info:/1731785219883/Put/seqid=0 2024-11-16T19:27:28,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41287 is added to blk_1073741844_1020 (size=12509) 2024-11-16T19:27:28,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741844_1020 (size=12509) 2024-11-16T19:27:28,161 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/.tmp/info/fb4ab094f1004da0b35151b35ae4cdc6 2024-11-16T19:27:28,162 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:28,163 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:28,163 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:28,163 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:28,163 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:28,163 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/WALs/d11ab77873cb,34913,1731785178695/d11ab77873cb%2C34913%2C1731785178695.1731785238133 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/WALs/d11ab77873cb,34913,1731785178695/d11ab77873cb%2C34913%2C1731785178695.1731785248144 2024-11-16T19:27:28,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741843_1019 (size=93) 2024-11-16T19:27:28,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41287 is added to blk_1073741843_1019 (size=93) 2024-11-16T19:27:28,180 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41507:41507),(127.0.0.1/127.0.0.1:41019:41019)] 2024-11-16T19:27:28,180 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/WALs/d11ab77873cb,34913,1731785178695/d11ab77873cb%2C34913%2C1731785178695.1731785238133 is not closed yet, will try archiving it next time 2024-11-16T19:27:28,180 INFO [regionserver/d11ab77873cb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C34913%2C1731785178695.1731785248180 2024-11-16T19:27:28,186 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/.tmp/info/fb4ab094f1004da0b35151b35ae4cdc6 as hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/info/fb4ab094f1004da0b35151b35ae4cdc6 2024-11-16T19:27:28,196 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/info/fb4ab094f1004da0b35151b35ae4cdc6, entries=7, sequenceid=31, filesize=12.2 K 2024-11-16T19:27:28,198 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=1.05 KB/1076 for c55727d527c475aa76261629fa04dca7 in 5061ms, sequenceid=31, compaction requested=true 2024-11-16T19:27:28,199 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c55727d527c475aa76261629fa04dca7: 2024-11-16T19:27:28,199 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-16T19:27:28,199 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T19:27:28,199 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/info/70012d9f5d4b4bc7b891acdfc58d8a94 because midkey is the same as first or last row 2024-11-16T19:27:28,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c55727d527c475aa76261629fa04dca7:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T19:27:28,205 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:28,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T19:27:28,206 DEBUG [RS:0;d11ab77873cb:34913-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T19:27:28,206 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:28,206 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:28,206 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:28,206 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:28,206 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/WALs/d11ab77873cb,34913,1731785178695/d11ab77873cb%2C34913%2C1731785178695.1731785248144 with entries=2, filesize=1.56 KB; new WAL /user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/WALs/d11ab77873cb,34913,1731785178695/d11ab77873cb%2C34913%2C1731785178695.1731785248180 2024-11-16T19:27:28,209 DEBUG [RS:0;d11ab77873cb:34913-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T19:27:28,210 DEBUG [RS:0;d11ab77873cb:34913-shortCompactions-0 {}] regionserver.HStore(1541): c55727d527c475aa76261629fa04dca7/info is initiating minor compaction (all files) 2024-11-16T19:27:28,211 INFO [RS:0;d11ab77873cb:34913-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c55727d527c475aa76261629fa04dca7/info in TestLogRolling-testSlowSyncLogRolling,,1731785180676.c55727d527c475aa76261629fa04dca7. 2024-11-16T19:27:28,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41287 is added to blk_1073741845_1021 (size=1603) 2024-11-16T19:27:28,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741845_1021 (size=1603) 2024-11-16T19:27:28,211 INFO [RS:0;d11ab77873cb:34913-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/info/70012d9f5d4b4bc7b891acdfc58d8a94, hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/info/ef8dda23d8ef4660813f39b5580b8f41, hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/info/fb4ab094f1004da0b35151b35ae4cdc6] into tmpdir=hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/.tmp, totalSize=36.6 K 2024-11-16T19:27:28,213 DEBUG [RS:0;d11ab77873cb:34913-shortCompactions-0 {}] compactions.Compactor(225): Compacting 70012d9f5d4b4bc7b891acdfc58d8a94, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731785190789 2024-11-16T19:27:28,213 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/WALs/d11ab77873cb,34913,1731785178695/d11ab77873cb%2C34913%2C1731785178695.1731785238133 is not closed yet, will try archiving it next time 2024-11-16T19:27:28,213 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/WALs/d11ab77873cb,34913,1731785178695/d11ab77873cb%2C34913%2C1731785178695.1731785210842 to hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/oldWALs/d11ab77873cb%2C34913%2C1731785178695.1731785210842 2024-11-16T19:27:28,214 DEBUG [RS:0;d11ab77873cb:34913-shortCompactions-0 {}] compactions.Compactor(225): Compacting ef8dda23d8ef4660813f39b5580b8f41, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731785204827 2024-11-16T19:27:28,214 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/WALs/d11ab77873cb,34913,1731785178695/d11ab77873cb%2C34913%2C1731785178695.1731785222295 to hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/oldWALs/d11ab77873cb%2C34913%2C1731785178695.1731785222295 2024-11-16T19:27:28,214 DEBUG [RS:0;d11ab77873cb:34913-shortCompactions-0 {}] compactions.Compactor(225): Compacting fb4ab094f1004da0b35151b35ae4cdc6, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731785219883 2024-11-16T19:27:28,216 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/WALs/d11ab77873cb,34913,1731785178695/d11ab77873cb%2C34913%2C1731785178695.1731785233111 to hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/oldWALs/d11ab77873cb%2C34913%2C1731785178695.1731785233111 2024-11-16T19:27:28,217 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41507:41507),(127.0.0.1/127.0.0.1:41019:41019)] 2024-11-16T19:27:28,217 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/WALs/d11ab77873cb,34913,1731785178695/d11ab77873cb%2C34913%2C1731785178695.1731785238133 is not closed yet, will try archiving it next time 2024-11-16T19:27:28,248 INFO [RS:0;d11ab77873cb:34913-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c55727d527c475aa76261629fa04dca7#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T19:27:28,249 DEBUG [RS:0;d11ab77873cb:34913-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/.tmp/info/730001c3452f4104bb61ab4bdb207380 is 1080, key is row0001/info:/1731785190789/Put/seqid=0 2024-11-16T19:27:28,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741847_1023 (size=27710) 2024-11-16T19:27:28,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41287 is added to blk_1073741847_1023 (size=27710) 2024-11-16T19:27:28,266 DEBUG [RS:0;d11ab77873cb:34913-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/.tmp/info/730001c3452f4104bb61ab4bdb207380 as hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/info/730001c3452f4104bb61ab4bdb207380 2024-11-16T19:27:28,282 INFO [RS:0;d11ab77873cb:34913-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c55727d527c475aa76261629fa04dca7/info of c55727d527c475aa76261629fa04dca7 into 730001c3452f4104bb61ab4bdb207380(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T19:27:28,282 DEBUG [RS:0;d11ab77873cb:34913-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c55727d527c475aa76261629fa04dca7: 2024-11-16T19:27:28,284 INFO [RS:0;d11ab77873cb:34913-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731785180676.c55727d527c475aa76261629fa04dca7., storeName=c55727d527c475aa76261629fa04dca7/info, priority=13, startTime=1731785248200; duration=0sec 2024-11-16T19:27:28,284 DEBUG [RS:0;d11ab77873cb:34913-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-16T19:27:28,284 DEBUG [RS:0;d11ab77873cb:34913-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T19:27:28,284 DEBUG [RS:0;d11ab77873cb:34913-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/info/730001c3452f4104bb61ab4bdb207380 because midkey is the same as first or last row 2024-11-16T19:27:28,284 DEBUG [RS:0;d11ab77873cb:34913-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-16T19:27:28,284 DEBUG [RS:0;d11ab77873cb:34913-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T19:27:28,284 DEBUG [RS:0;d11ab77873cb:34913-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/info/730001c3452f4104bb61ab4bdb207380 because midkey is the same as first or last row 2024-11-16T19:27:28,284 DEBUG [RS:0;d11ab77873cb:34913-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-16T19:27:28,285 DEBUG [RS:0;d11ab77873cb:34913-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T19:27:28,285 DEBUG [RS:0;d11ab77873cb:34913-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/info/730001c3452f4104bb61ab4bdb207380 because midkey is the same as first or last row 2024-11-16T19:27:28,285 DEBUG [RS:0;d11ab77873cb:34913-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T19:27:28,285 DEBUG [RS:0;d11ab77873cb:34913-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c55727d527c475aa76261629fa04dca7:info 2024-11-16T19:27:28,570 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/WALs/d11ab77873cb,34913,1731785178695/d11ab77873cb%2C34913%2C1731785178695.1731785238133 to hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/oldWALs/d11ab77873cb%2C34913%2C1731785178695.1731785238133 2024-11-16T19:27:40,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34913 {}] regionserver.HRegion(8855): Flush requested on c55727d527c475aa76261629fa04dca7 2024-11-16T19:27:40,218 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c55727d527c475aa76261629fa04dca7 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T19:27:40,225 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/.tmp/info/2a12c063c5d74249b35acb1589c61f2f is 1080, key is row0022/info:/1731785248182/Put/seqid=0 2024-11-16T19:27:40,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41287 is added to blk_1073741848_1024 (size=12509) 2024-11-16T19:27:40,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741848_1024 (size=12509) 2024-11-16T19:27:40,233 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/.tmp/info/2a12c063c5d74249b35acb1589c61f2f 2024-11-16T19:27:40,244 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/.tmp/info/2a12c063c5d74249b35acb1589c61f2f as hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/info/2a12c063c5d74249b35acb1589c61f2f 2024-11-16T19:27:40,252 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/info/2a12c063c5d74249b35acb1589c61f2f, entries=7, sequenceid=42, filesize=12.2 K 2024-11-16T19:27:40,254 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for c55727d527c475aa76261629fa04dca7 in 35ms, sequenceid=42, compaction requested=false 2024-11-16T19:27:40,254 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c55727d527c475aa76261629fa04dca7: 2024-11-16T19:27:40,254 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-16T19:27:40,254 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T19:27:40,254 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/info/730001c3452f4104bb61ab4bdb207380 because midkey is the same as first or last row 2024-11-16T19:27:47,267 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T19:27:48,230 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T19:27:48,231 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T19:27:48,232 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T19:27:48,242 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:27:48,243 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:27:48,243 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T19:27:48,243 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T19:27:48,243 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=118547021, stopped=false 2024-11-16T19:27:48,243 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=d11ab77873cb,41597,1731785178126 2024-11-16T19:27:48,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1004a0015560000, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T19:27:48,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34913-0x1004a0015560001, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T19:27:48,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1004a0015560000, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:27:48,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34913-0x1004a0015560001, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:27:48,245 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T19:27:48,245 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T19:27:48,245 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T19:27:48,245 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:27:48,245 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34913-0x1004a0015560001, quorum=127.0.0.1:55736, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T19:27:48,245 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41597-0x1004a0015560000, quorum=127.0.0.1:55736, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T19:27:48,245 INFO [RS:0;d11ab77873cb:34913 {}] regionserver.HRegionServer(878): Closing user regions 2024-11-16T19:27:48,246 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'd11ab77873cb,34913,1731785178695' ***** 2024-11-16T19:27:48,246 INFO [RS:0;d11ab77873cb:34913 {}] regionserver.HRegionServer(3091): Received CLOSE for c55727d527c475aa76261629fa04dca7 2024-11-16T19:27:48,246 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T19:27:48,247 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing c55727d527c475aa76261629fa04dca7, disabling compactions & flushes 2024-11-16T19:27:48,247 INFO [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731785180676.c55727d527c475aa76261629fa04dca7. 2024-11-16T19:27:48,247 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731785180676.c55727d527c475aa76261629fa04dca7. 2024-11-16T19:27:48,247 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731785180676.c55727d527c475aa76261629fa04dca7. after waiting 0 ms 2024-11-16T19:27:48,247 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731785180676.c55727d527c475aa76261629fa04dca7. 2024-11-16T19:27:48,247 INFO [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing c55727d527c475aa76261629fa04dca7 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-16T19:27:48,248 INFO [RS:0;d11ab77873cb:34913 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T19:27:48,248 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T19:27:48,248 INFO [RS:0;d11ab77873cb:34913 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T19:27:48,248 INFO [RS:0;d11ab77873cb:34913 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T19:27:48,248 INFO [RS:0;d11ab77873cb:34913 {}] regionserver.HRegionServer(959): stopping server d11ab77873cb,34913,1731785178695 2024-11-16T19:27:48,249 INFO [RS:0;d11ab77873cb:34913 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T19:27:48,249 INFO [RS:0;d11ab77873cb:34913 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;d11ab77873cb:34913. 2024-11-16T19:27:48,249 DEBUG [RS:0;d11ab77873cb:34913 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T19:27:48,249 DEBUG [RS:0;d11ab77873cb:34913 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:27:48,249 INFO [RS:0;d11ab77873cb:34913 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T19:27:48,249 INFO [RS:0;d11ab77873cb:34913 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T19:27:48,249 INFO [RS:0;d11ab77873cb:34913 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T19:27:48,249 INFO [RS:0;d11ab77873cb:34913 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T19:27:48,250 INFO [RS:0;d11ab77873cb:34913 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-16T19:27:48,250 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T19:27:48,250 DEBUG [RS:0;d11ab77873cb:34913 {}] regionserver.HRegionServer(1325): Online Regions={c55727d527c475aa76261629fa04dca7=TestLogRolling-testSlowSyncLogRolling,,1731785180676.c55727d527c475aa76261629fa04dca7., 1588230740=hbase:meta,,1.1588230740} 2024-11-16T19:27:48,250 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T19:27:48,250 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T19:27:48,250 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T19:27:48,250 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T19:27:48,250 DEBUG [RS:0;d11ab77873cb:34913 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, c55727d527c475aa76261629fa04dca7 2024-11-16T19:27:48,250 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-16T19:27:48,254 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/.tmp/info/ec1fe58440104d5eb79a1639c1ca2ce9 is 1080, key is row0029/info:/1731785262220/Put/seqid=0 2024-11-16T19:27:48,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41287 is added to blk_1073741849_1025 (size=8193) 2024-11-16T19:27:48,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741849_1025 (size=8193) 2024-11-16T19:27:48,264 INFO [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/.tmp/info/ec1fe58440104d5eb79a1639c1ca2ce9 2024-11-16T19:27:48,272 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/hbase/meta/1588230740/.tmp/info/a965c1b4b186474bb79fd4ccdf9c5405 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731785180676.c55727d527c475aa76261629fa04dca7./info:regioninfo/1731785181144/Put/seqid=0 2024-11-16T19:27:48,275 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/.tmp/info/ec1fe58440104d5eb79a1639c1ca2ce9 as hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/info/ec1fe58440104d5eb79a1639c1ca2ce9 2024-11-16T19:27:48,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741850_1026 (size=7016) 2024-11-16T19:27:48,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41287 is added to blk_1073741850_1026 (size=7016) 2024-11-16T19:27:48,279 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/hbase/meta/1588230740/.tmp/info/a965c1b4b186474bb79fd4ccdf9c5405 2024-11-16T19:27:48,284 INFO [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/info/ec1fe58440104d5eb79a1639c1ca2ce9, entries=3, sequenceid=48, filesize=8.0 K 2024-11-16T19:27:48,286 INFO [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for c55727d527c475aa76261629fa04dca7 in 39ms, sequenceid=48, compaction requested=true 2024-11-16T19:27:48,287 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731785180676.c55727d527c475aa76261629fa04dca7.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/info/70012d9f5d4b4bc7b891acdfc58d8a94, hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/info/ef8dda23d8ef4660813f39b5580b8f41, hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/info/fb4ab094f1004da0b35151b35ae4cdc6] to archive 2024-11-16T19:27:48,289 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731785180676.c55727d527c475aa76261629fa04dca7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T19:27:48,292 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731785180676.c55727d527c475aa76261629fa04dca7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/info/70012d9f5d4b4bc7b891acdfc58d8a94 to hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/archive/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/info/70012d9f5d4b4bc7b891acdfc58d8a94 2024-11-16T19:27:48,295 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731785180676.c55727d527c475aa76261629fa04dca7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/info/ef8dda23d8ef4660813f39b5580b8f41 to hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/archive/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/info/ef8dda23d8ef4660813f39b5580b8f41 2024-11-16T19:27:48,297 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731785180676.c55727d527c475aa76261629fa04dca7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/info/fb4ab094f1004da0b35151b35ae4cdc6 to hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/archive/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/info/fb4ab094f1004da0b35151b35ae4cdc6 2024-11-16T19:27:48,304 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/hbase/meta/1588230740/.tmp/ns/fa601e63a6894f5c9763a60a2008f5d5 is 43, key is default/ns:d/1731785180469/Put/seqid=0 2024-11-16T19:27:48,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41287 is added to blk_1073741851_1027 (size=5153) 2024-11-16T19:27:48,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741851_1027 (size=5153) 2024-11-16T19:27:48,311 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/hbase/meta/1588230740/.tmp/ns/fa601e63a6894f5c9763a60a2008f5d5 2024-11-16T19:27:48,307 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731785180676.c55727d527c475aa76261629fa04dca7.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=d11ab77873cb:41597 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-16T19:27:48,312 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731785180676.c55727d527c475aa76261629fa04dca7.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [70012d9f5d4b4bc7b891acdfc58d8a94=12509, ef8dda23d8ef4660813f39b5580b8f41=12509, fb4ab094f1004da0b35151b35ae4cdc6=12509] 2024-11-16T19:27:48,318 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/default/TestLogRolling-testSlowSyncLogRolling/c55727d527c475aa76261629fa04dca7/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-16T19:27:48,320 INFO [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731785180676.c55727d527c475aa76261629fa04dca7. 2024-11-16T19:27:48,320 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for c55727d527c475aa76261629fa04dca7: Waiting for close lock at 1731785268246Running coprocessor pre-close hooks at 1731785268247 (+1 ms)Disabling compacts and flushes for region at 1731785268247Disabling writes for close at 1731785268247Obtaining lock to block concurrent updates at 1731785268247Preparing flush snapshotting stores in c55727d527c475aa76261629fa04dca7 at 1731785268247Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731785180676.c55727d527c475aa76261629fa04dca7., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731785268248 (+1 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731785180676.c55727d527c475aa76261629fa04dca7. at 1731785268249 (+1 ms)Flushing c55727d527c475aa76261629fa04dca7/info: creating writer at 1731785268249Flushing c55727d527c475aa76261629fa04dca7/info: appending metadata at 1731785268253 (+4 ms)Flushing c55727d527c475aa76261629fa04dca7/info: closing flushed file at 1731785268253Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@747348d7: reopening flushed file at 1731785268274 (+21 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for c55727d527c475aa76261629fa04dca7 in 39ms, sequenceid=48, compaction requested=true at 1731785268286 (+12 ms)Writing region close event to WAL at 1731785268314 (+28 ms)Running coprocessor post-close hooks at 1731785268319 (+5 ms)Closed at 1731785268320 (+1 ms) 2024-11-16T19:27:48,321 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731785180676.c55727d527c475aa76261629fa04dca7. 2024-11-16T19:27:48,336 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/hbase/meta/1588230740/.tmp/table/d13bfed31ced4a6dbb3c91d0f9656b65 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731785181161/Put/seqid=0 2024-11-16T19:27:48,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41287 is added to blk_1073741852_1028 (size=5396) 2024-11-16T19:27:48,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741852_1028 (size=5396) 2024-11-16T19:27:48,343 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/hbase/meta/1588230740/.tmp/table/d13bfed31ced4a6dbb3c91d0f9656b65 2024-11-16T19:27:48,354 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/hbase/meta/1588230740/.tmp/info/a965c1b4b186474bb79fd4ccdf9c5405 as hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/hbase/meta/1588230740/info/a965c1b4b186474bb79fd4ccdf9c5405 2024-11-16T19:27:48,364 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/hbase/meta/1588230740/info/a965c1b4b186474bb79fd4ccdf9c5405, entries=10, sequenceid=11, filesize=6.9 K 2024-11-16T19:27:48,366 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/hbase/meta/1588230740/.tmp/ns/fa601e63a6894f5c9763a60a2008f5d5 as hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/hbase/meta/1588230740/ns/fa601e63a6894f5c9763a60a2008f5d5 2024-11-16T19:27:48,375 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/hbase/meta/1588230740/ns/fa601e63a6894f5c9763a60a2008f5d5, entries=2, sequenceid=11, filesize=5.0 K 2024-11-16T19:27:48,376 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/hbase/meta/1588230740/.tmp/table/d13bfed31ced4a6dbb3c91d0f9656b65 as hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/hbase/meta/1588230740/table/d13bfed31ced4a6dbb3c91d0f9656b65 2024-11-16T19:27:48,385 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/hbase/meta/1588230740/table/d13bfed31ced4a6dbb3c91d0f9656b65, entries=2, sequenceid=11, filesize=5.3 K 2024-11-16T19:27:48,386 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 136ms, sequenceid=11, compaction requested=false 2024-11-16T19:27:48,393 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-16T19:27:48,394 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T19:27:48,394 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T19:27:48,394 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731785268250Running coprocessor pre-close hooks at 1731785268250Disabling compacts and flushes for region at 1731785268250Disabling writes for close at 1731785268250Obtaining lock to block concurrent updates at 1731785268250Preparing flush snapshotting stores in 1588230740 at 1731785268250Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731785268251 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731785268252 (+1 ms)Flushing 1588230740/info: creating writer at 1731785268252Flushing 1588230740/info: appending metadata at 1731785268272 (+20 ms)Flushing 1588230740/info: closing flushed file at 1731785268272Flushing 1588230740/ns: creating writer at 1731785268287 (+15 ms)Flushing 1588230740/ns: appending metadata at 1731785268303 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1731785268303Flushing 1588230740/table: creating writer at 1731785268321 (+18 ms)Flushing 1588230740/table: appending metadata at 1731785268335 (+14 ms)Flushing 1588230740/table: closing flushed file at 1731785268335Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@19f41690: reopening flushed file at 1731785268352 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@50d70409: reopening flushed file at 1731785268364 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@65134d1f: reopening flushed file at 1731785268375 (+11 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 136ms, sequenceid=11, compaction requested=false at 1731785268386 (+11 ms)Writing region close event to WAL at 1731785268388 (+2 ms)Running coprocessor post-close hooks at 1731785268394 (+6 ms)Closed at 1731785268394 2024-11-16T19:27:48,394 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T19:27:48,451 INFO [RS:0;d11ab77873cb:34913 {}] regionserver.HRegionServer(976): stopping server d11ab77873cb,34913,1731785178695; all regions closed. 2024-11-16T19:27:48,453 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:48,453 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:48,453 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:48,453 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:48,453 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:48,454 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T19:27:48,455 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T19:27:48,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741834_1010 (size=3066) 2024-11-16T19:27:48,456 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-16T19:27:48,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41287 is added to blk_1073741834_1010 (size=3066) 2024-11-16T19:27:48,461 DEBUG [RS:0;d11ab77873cb:34913 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/oldWALs 2024-11-16T19:27:48,461 INFO [RS:0;d11ab77873cb:34913 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d11ab77873cb%2C34913%2C1731785178695.meta:.meta(num 1731785180323) 2024-11-16T19:27:48,462 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:48,462 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:48,462 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:48,462 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:48,462 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:48,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41287 is added to blk_1073741846_1022 (size=12695) 2024-11-16T19:27:48,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741846_1022 (size=12695) 2024-11-16T19:27:48,469 DEBUG [RS:0;d11ab77873cb:34913 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/oldWALs 2024-11-16T19:27:48,469 INFO [RS:0;d11ab77873cb:34913 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d11ab77873cb%2C34913%2C1731785178695:(num 1731785248180) 2024-11-16T19:27:48,469 DEBUG [RS:0;d11ab77873cb:34913 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:27:48,469 INFO [RS:0;d11ab77873cb:34913 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T19:27:48,469 INFO [RS:0;d11ab77873cb:34913 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T19:27:48,469 INFO [RS:0;d11ab77873cb:34913 {}] hbase.ChoreService(370): Chore service for: regionserver/d11ab77873cb:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-16T19:27:48,470 INFO [RS:0;d11ab77873cb:34913 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T19:27:48,470 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T19:27:48,470 INFO [RS:0;d11ab77873cb:34913 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34913 2024-11-16T19:27:48,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1004a0015560000, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T19:27:48,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34913-0x1004a0015560001, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/d11ab77873cb,34913,1731785178695 2024-11-16T19:27:48,473 INFO [RS:0;d11ab77873cb:34913 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T19:27:48,474 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [d11ab77873cb,34913,1731785178695] 2024-11-16T19:27:48,475 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/d11ab77873cb,34913,1731785178695 already deleted, retry=false 2024-11-16T19:27:48,475 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; d11ab77873cb,34913,1731785178695 expired; onlineServers=0 2024-11-16T19:27:48,476 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'd11ab77873cb,41597,1731785178126' ***** 2024-11-16T19:27:48,476 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T19:27:48,476 INFO [M:0;d11ab77873cb:41597 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T19:27:48,476 INFO [M:0;d11ab77873cb:41597 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T19:27:48,476 DEBUG [M:0;d11ab77873cb:41597 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T19:27:48,476 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T19:27:48,476 DEBUG [M:0;d11ab77873cb:41597 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T19:27:48,476 DEBUG [master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.large.0-1731785179587 {}] cleaner.HFileCleaner(306): Exit Thread[master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.large.0-1731785179587,5,FailOnTimeoutGroup] 2024-11-16T19:27:48,476 DEBUG [master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.small.0-1731785179593 {}] cleaner.HFileCleaner(306): Exit Thread[master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.small.0-1731785179593,5,FailOnTimeoutGroup] 2024-11-16T19:27:48,476 INFO [M:0;d11ab77873cb:41597 {}] hbase.ChoreService(370): Chore service for: master/d11ab77873cb:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T19:27:48,477 INFO [M:0;d11ab77873cb:41597 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T19:27:48,477 DEBUG [M:0;d11ab77873cb:41597 {}] master.HMaster(1795): Stopping service threads 2024-11-16T19:27:48,477 INFO [M:0;d11ab77873cb:41597 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T19:27:48,477 INFO [M:0;d11ab77873cb:41597 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T19:27:48,478 INFO [M:0;d11ab77873cb:41597 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T19:27:48,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1004a0015560000, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T19:27:48,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1004a0015560000, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:27:48,478 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T19:27:48,478 DEBUG [M:0;d11ab77873cb:41597 {}] zookeeper.ZKUtil(347): master:41597-0x1004a0015560000, quorum=127.0.0.1:55736, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T19:27:48,478 WARN [M:0;d11ab77873cb:41597 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T19:27:48,479 INFO [M:0;d11ab77873cb:41597 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/.lastflushedseqids 2024-11-16T19:27:48,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741853_1029 (size=130) 2024-11-16T19:27:48,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41287 is added to blk_1073741853_1029 (size=130) 2024-11-16T19:27:48,491 INFO [M:0;d11ab77873cb:41597 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T19:27:48,491 INFO [M:0;d11ab77873cb:41597 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T19:27:48,492 DEBUG [M:0;d11ab77873cb:41597 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T19:27:48,492 INFO [M:0;d11ab77873cb:41597 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:27:48,492 DEBUG [M:0;d11ab77873cb:41597 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:27:48,492 DEBUG [M:0;d11ab77873cb:41597 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T19:27:48,492 DEBUG [M:0;d11ab77873cb:41597 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:27:48,492 INFO [M:0;d11ab77873cb:41597 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-16T19:27:48,513 DEBUG [M:0;d11ab77873cb:41597 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cc60b56192fc45c3963e241da85cde98 is 82, key is hbase:meta,,1/info:regioninfo/1731785180398/Put/seqid=0 2024-11-16T19:27:48,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741854_1030 (size=5672) 2024-11-16T19:27:48,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41287 is added to blk_1073741854_1030 (size=5672) 2024-11-16T19:27:48,520 INFO [M:0;d11ab77873cb:41597 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cc60b56192fc45c3963e241da85cde98 2024-11-16T19:27:48,543 DEBUG [M:0;d11ab77873cb:41597 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/335281fe682c4263ab17d589fc2ce4f3 is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731785181169/Put/seqid=0 2024-11-16T19:27:48,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741855_1031 (size=6247) 2024-11-16T19:27:48,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41287 is added to blk_1073741855_1031 (size=6247) 2024-11-16T19:27:48,550 INFO [M:0;d11ab77873cb:41597 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/335281fe682c4263ab17d589fc2ce4f3 2024-11-16T19:27:48,557 INFO [M:0;d11ab77873cb:41597 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 335281fe682c4263ab17d589fc2ce4f3 2024-11-16T19:27:48,574 DEBUG [M:0;d11ab77873cb:41597 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/88b27ad186514af78bb096019495e227 is 69, key is d11ab77873cb,34913,1731785178695/rs:state/1731785179609/Put/seqid=0 2024-11-16T19:27:48,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34913-0x1004a0015560001, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T19:27:48,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34913-0x1004a0015560001, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T19:27:48,576 INFO [RS:0;d11ab77873cb:34913 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T19:27:48,576 INFO [RS:0;d11ab77873cb:34913 {}] regionserver.HRegionServer(1031): Exiting; stopping=d11ab77873cb,34913,1731785178695; zookeeper connection closed. 2024-11-16T19:27:48,577 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@33fb88f7 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@33fb88f7 2024-11-16T19:27:48,577 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T19:27:48,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741856_1032 (size=5156) 2024-11-16T19:27:48,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41287 is added to blk_1073741856_1032 (size=5156) 2024-11-16T19:27:48,982 INFO [M:0;d11ab77873cb:41597 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/88b27ad186514af78bb096019495e227 2024-11-16T19:27:49,012 DEBUG [M:0;d11ab77873cb:41597 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/63dec6a47b3d4cb788212e3280f41d52 is 52, key is load_balancer_on/state:d/1731785180656/Put/seqid=0 2024-11-16T19:27:49,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41287 is added to blk_1073741857_1033 (size=5056) 2024-11-16T19:27:49,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741857_1033 (size=5056) 2024-11-16T19:27:49,019 INFO [M:0;d11ab77873cb:41597 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/63dec6a47b3d4cb788212e3280f41d52 2024-11-16T19:27:49,026 DEBUG [M:0;d11ab77873cb:41597 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cc60b56192fc45c3963e241da85cde98 as hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cc60b56192fc45c3963e241da85cde98 2024-11-16T19:27:49,032 INFO [M:0;d11ab77873cb:41597 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cc60b56192fc45c3963e241da85cde98, entries=8, sequenceid=59, filesize=5.5 K 2024-11-16T19:27:49,034 DEBUG [M:0;d11ab77873cb:41597 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/335281fe682c4263ab17d589fc2ce4f3 as hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/335281fe682c4263ab17d589fc2ce4f3 2024-11-16T19:27:49,040 INFO [M:0;d11ab77873cb:41597 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 335281fe682c4263ab17d589fc2ce4f3 2024-11-16T19:27:49,041 INFO [M:0;d11ab77873cb:41597 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/335281fe682c4263ab17d589fc2ce4f3, entries=6, sequenceid=59, filesize=6.1 K 2024-11-16T19:27:49,042 DEBUG [M:0;d11ab77873cb:41597 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/88b27ad186514af78bb096019495e227 as hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/88b27ad186514af78bb096019495e227 2024-11-16T19:27:49,048 INFO [M:0;d11ab77873cb:41597 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/88b27ad186514af78bb096019495e227, entries=1, sequenceid=59, filesize=5.0 K 2024-11-16T19:27:49,049 DEBUG [M:0;d11ab77873cb:41597 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/63dec6a47b3d4cb788212e3280f41d52 as hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/63dec6a47b3d4cb788212e3280f41d52 2024-11-16T19:27:49,056 INFO [M:0;d11ab77873cb:41597 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/63dec6a47b3d4cb788212e3280f41d52, entries=1, sequenceid=59, filesize=4.9 K 2024-11-16T19:27:49,058 INFO [M:0;d11ab77873cb:41597 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 565ms, sequenceid=59, compaction requested=false 2024-11-16T19:27:49,059 INFO [M:0;d11ab77873cb:41597 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:27:49,059 DEBUG [M:0;d11ab77873cb:41597 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731785268492Disabling compacts and flushes for region at 1731785268492Disabling writes for close at 1731785268492Obtaining lock to block concurrent updates at 1731785268492Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731785268492Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1731785268493 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731785268494 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731785268494Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731785268512 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731785268512Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731785268528 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731785268543 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731785268543Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731785268557 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731785268573 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731785268573Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731785268994 (+421 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731785269012 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731785269012Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@556923b1: reopening flushed file at 1731785269025 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6ef0d39e: reopening flushed file at 1731785269033 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6a92ce3: reopening flushed file at 1731785269041 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5c7e2316: reopening flushed file at 1731785269048 (+7 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 565ms, sequenceid=59, compaction requested=false at 1731785269058 (+10 ms)Writing region close event to WAL at 1731785269059 (+1 ms)Closed at 1731785269059 2024-11-16T19:27:49,060 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:49,060 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:49,060 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:49,060 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:49,061 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:49,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41287 is added to blk_1073741830_1006 (size=27973) 2024-11-16T19:27:49,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39641 is added to blk_1073741830_1006 (size=27973) 2024-11-16T19:27:49,064 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T19:27:49,064 INFO [M:0;d11ab77873cb:41597 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T19:27:49,064 INFO [M:0;d11ab77873cb:41597 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41597 2024-11-16T19:27:49,064 INFO [M:0;d11ab77873cb:41597 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T19:27:49,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1004a0015560000, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T19:27:49,166 INFO [M:0;d11ab77873cb:41597 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T19:27:49,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41597-0x1004a0015560000, quorum=127.0.0.1:55736, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T19:27:49,173 WARN [BP-1946553720-172.17.0.2-1731785175328 heartbeating to localhost/127.0.0.1:33243 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1946553720-172.17.0.2-1731785175328 (Datanode Uuid 840e330b-ce22-48ad-9b96-5a95a2354a56) service to localhost/127.0.0.1:33243 2024-11-16T19:27:49,175 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:27:49,176 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/cluster_3002c0b6-5f11-156d-a8b9-03e0a7e62c10/data/data3/current/BP-1946553720-172.17.0.2-1731785175328 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:27:49,176 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/cluster_3002c0b6-5f11-156d-a8b9-03e0a7e62c10/data/data4/current/BP-1946553720-172.17.0.2-1731785175328 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:27:49,178 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T19:27:49,178 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T19:27:49,178 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T19:27:49,178 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/hadoop.log.dir/,STOPPED} 2024-11-16T19:27:49,181 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T19:27:49,183 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:27:49,183 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T19:27:49,183 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T19:27:49,184 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T19:27:49,184 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/hadoop.log.dir/,STOPPED} 2024-11-16T19:27:49,185 WARN [BP-1946553720-172.17.0.2-1731785175328 heartbeating to localhost/127.0.0.1:33243 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T19:27:49,185 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T19:27:49,185 WARN [BP-1946553720-172.17.0.2-1731785175328 heartbeating to localhost/127.0.0.1:33243 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1946553720-172.17.0.2-1731785175328 (Datanode Uuid 114de53b-21e8-42bb-a5d2-50fe379859ed) service to localhost/127.0.0.1:33243 2024-11-16T19:27:49,185 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T19:27:49,186 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/cluster_3002c0b6-5f11-156d-a8b9-03e0a7e62c10/data/data1/current/BP-1946553720-172.17.0.2-1731785175328 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:27:49,186 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/cluster_3002c0b6-5f11-156d-a8b9-03e0a7e62c10/data/data2/current/BP-1946553720-172.17.0.2-1731785175328 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:27:49,187 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T19:27:49,196 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T19:27:49,197 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T19:27:49,197 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T19:27:49,197 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T19:27:49,197 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/hadoop.log.dir/,STOPPED} 2024-11-16T19:27:49,206 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T19:27:49,235 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T19:27:49,244 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=79 (was 12) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:33243 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/d11ab77873cb:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@44be2bb java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33243 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33243 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: Command processor java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33243 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33243 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: master/d11ab77873cb:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: master/d11ab77873cb:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33243 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33243 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: regionserver/d11ab77873cb:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33243 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) - Thread LEAK? -, OpenFileDescriptor=405 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=161 (was 378), ProcessCount=11 (was 11), AvailableMemoryMB=2825 (was 3611) 2024-11-16T19:27:49,250 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=80, OpenFileDescriptor=405, MaxFileDescriptor=1048576, SystemLoadAverage=161, ProcessCount=11, AvailableMemoryMB=2825 2024-11-16T19:27:49,250 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T19:27:49,250 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/hadoop.log.dir so I do NOT create it in target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90 2024-11-16T19:27:49,250 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f3b526b-b851-f233-7dc4-6b73c8295996/hadoop.tmp.dir so I do NOT create it in target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90 2024-11-16T19:27:49,251 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/cluster_b5a9424a-2b64-4c46-3399-927841de40ac, deleteOnExit=true 2024-11-16T19:27:49,251 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T19:27:49,251 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/test.cache.data in system properties and HBase conf 2024-11-16T19:27:49,251 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T19:27:49,251 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/hadoop.log.dir in system properties and HBase conf 2024-11-16T19:27:49,251 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T19:27:49,251 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T19:27:49,251 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T19:27:49,251 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T19:27:49,252 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T19:27:49,252 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T19:27:49,252 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T19:27:49,252 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T19:27:49,252 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T19:27:49,252 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T19:27:49,252 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T19:27:49,252 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T19:27:49,252 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T19:27:49,252 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/nfs.dump.dir in system properties and HBase conf 2024-11-16T19:27:49,252 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/java.io.tmpdir in system properties and HBase conf 2024-11-16T19:27:49,252 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T19:27:49,252 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T19:27:49,253 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T19:27:49,265 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T19:27:49,321 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:27:49,330 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T19:27:49,333 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T19:27:49,333 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T19:27:49,333 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T19:27:49,334 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:27:49,335 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3bd9c5b4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/hadoop.log.dir/,AVAILABLE} 2024-11-16T19:27:49,335 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a3c3ceb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T19:27:49,431 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@52b0c086{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/java.io.tmpdir/jetty-localhost-43765-hadoop-hdfs-3_4_1-tests_jar-_-any-4684476155907445704/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T19:27:49,432 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4d9de743{HTTP/1.1, (http/1.1)}{localhost:43765} 2024-11-16T19:27:49,432 INFO [Time-limited test {}] server.Server(415): Started @96086ms 2024-11-16T19:27:49,443 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T19:27:49,492 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:27:49,497 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T19:27:49,498 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T19:27:49,498 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T19:27:49,498 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T19:27:49,499 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@55f7876e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/hadoop.log.dir/,AVAILABLE} 2024-11-16T19:27:49,499 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@607b9bc6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T19:27:49,596 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@30a1c2a3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/java.io.tmpdir/jetty-localhost-38427-hadoop-hdfs-3_4_1-tests_jar-_-any-13816486490771923796/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:27:49,596 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4620cd8a{HTTP/1.1, (http/1.1)}{localhost:38427} 2024-11-16T19:27:49,596 INFO [Time-limited test {}] server.Server(415): Started @96250ms 2024-11-16T19:27:49,598 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T19:27:49,626 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:27:49,630 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T19:27:49,630 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T19:27:49,630 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T19:27:49,631 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T19:27:49,631 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@463a48f5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/hadoop.log.dir/,AVAILABLE} 2024-11-16T19:27:49,631 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d944f53{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T19:27:49,664 WARN [Thread-431 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/cluster_b5a9424a-2b64-4c46-3399-927841de40ac/data/data1/current/BP-1042754415-172.17.0.2-1731785269281/current, will proceed with Du for space computation calculation, 2024-11-16T19:27:49,664 WARN [Thread-432 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/cluster_b5a9424a-2b64-4c46-3399-927841de40ac/data/data2/current/BP-1042754415-172.17.0.2-1731785269281/current, will proceed with Du for space computation calculation, 2024-11-16T19:27:49,683 WARN [Thread-410 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T19:27:49,686 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf93dddf7dd9915f0 with lease ID 0x847f55a5ae218f3f: Processing first storage report for DS-2958178f-bea7-4835-b8eb-9fdca0f38b7e from datanode DatanodeRegistration(127.0.0.1:42519, datanodeUuid=f6365867-f6d9-48b6-ab57-6985e0eb94ba, infoPort=41679, infoSecurePort=0, ipcPort=40493, storageInfo=lv=-57;cid=testClusterID;nsid=241958006;c=1731785269281) 2024-11-16T19:27:49,687 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf93dddf7dd9915f0 with lease ID 0x847f55a5ae218f3f: from storage DS-2958178f-bea7-4835-b8eb-9fdca0f38b7e node DatanodeRegistration(127.0.0.1:42519, datanodeUuid=f6365867-f6d9-48b6-ab57-6985e0eb94ba, infoPort=41679, infoSecurePort=0, ipcPort=40493, storageInfo=lv=-57;cid=testClusterID;nsid=241958006;c=1731785269281), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:27:49,687 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf93dddf7dd9915f0 with lease ID 0x847f55a5ae218f3f: Processing first storage report for DS-c07c485a-6a91-4d5a-95a7-604dfe979d29 from datanode DatanodeRegistration(127.0.0.1:42519, datanodeUuid=f6365867-f6d9-48b6-ab57-6985e0eb94ba, infoPort=41679, infoSecurePort=0, ipcPort=40493, storageInfo=lv=-57;cid=testClusterID;nsid=241958006;c=1731785269281) 2024-11-16T19:27:49,687 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf93dddf7dd9915f0 with lease ID 0x847f55a5ae218f3f: from storage DS-c07c485a-6a91-4d5a-95a7-604dfe979d29 node DatanodeRegistration(127.0.0.1:42519, datanodeUuid=f6365867-f6d9-48b6-ab57-6985e0eb94ba, infoPort=41679, infoSecurePort=0, ipcPort=40493, storageInfo=lv=-57;cid=testClusterID;nsid=241958006;c=1731785269281), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T19:27:49,705 INFO [regionserver/d11ab77873cb:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T19:27:49,734 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@c708570{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/java.io.tmpdir/jetty-localhost-39197-hadoop-hdfs-3_4_1-tests_jar-_-any-12801030544091429217/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:27:49,735 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7ab06e68{HTTP/1.1, (http/1.1)}{localhost:39197} 2024-11-16T19:27:49,735 INFO [Time-limited test {}] server.Server(415): Started @96389ms 2024-11-16T19:27:49,736 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T19:27:49,797 WARN [Thread-458 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/cluster_b5a9424a-2b64-4c46-3399-927841de40ac/data/data4/current/BP-1042754415-172.17.0.2-1731785269281/current, will proceed with Du for space computation calculation, 2024-11-16T19:27:49,797 WARN [Thread-457 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/cluster_b5a9424a-2b64-4c46-3399-927841de40ac/data/data3/current/BP-1042754415-172.17.0.2-1731785269281/current, will proceed with Du for space computation calculation, 2024-11-16T19:27:49,814 WARN [Thread-446 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T19:27:49,816 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x90628f57236605 with lease ID 0x847f55a5ae218f40: Processing first storage report for DS-d70fea26-99db-469a-93d5-09cc490685c2 from datanode DatanodeRegistration(127.0.0.1:35145, datanodeUuid=b870fa3e-51be-4adb-b923-90e3c402411e, infoPort=46393, infoSecurePort=0, ipcPort=38065, storageInfo=lv=-57;cid=testClusterID;nsid=241958006;c=1731785269281) 2024-11-16T19:27:49,816 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x90628f57236605 with lease ID 0x847f55a5ae218f40: from storage DS-d70fea26-99db-469a-93d5-09cc490685c2 node DatanodeRegistration(127.0.0.1:35145, datanodeUuid=b870fa3e-51be-4adb-b923-90e3c402411e, infoPort=46393, infoSecurePort=0, ipcPort=38065, storageInfo=lv=-57;cid=testClusterID;nsid=241958006;c=1731785269281), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:27:49,816 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x90628f57236605 with lease ID 0x847f55a5ae218f40: Processing first storage report for DS-28d15717-4263-465e-9468-3bfb0fdd3428 from datanode DatanodeRegistration(127.0.0.1:35145, datanodeUuid=b870fa3e-51be-4adb-b923-90e3c402411e, infoPort=46393, infoSecurePort=0, ipcPort=38065, storageInfo=lv=-57;cid=testClusterID;nsid=241958006;c=1731785269281) 2024-11-16T19:27:49,816 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x90628f57236605 with lease ID 0x847f55a5ae218f40: from storage DS-28d15717-4263-465e-9468-3bfb0fdd3428 node DatanodeRegistration(127.0.0.1:35145, datanodeUuid=b870fa3e-51be-4adb-b923-90e3c402411e, infoPort=46393, infoSecurePort=0, ipcPort=38065, storageInfo=lv=-57;cid=testClusterID;nsid=241958006;c=1731785269281), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:27:49,866 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90 2024-11-16T19:27:49,869 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/cluster_b5a9424a-2b64-4c46-3399-927841de40ac/zookeeper_0, clientPort=57302, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/cluster_b5a9424a-2b64-4c46-3399-927841de40ac/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/cluster_b5a9424a-2b64-4c46-3399-927841de40ac/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T19:27:49,870 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57302 2024-11-16T19:27:49,870 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:27:49,872 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:27:49,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42519 is added to blk_1073741825_1001 (size=7) 2024-11-16T19:27:49,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35145 is added to blk_1073741825_1001 (size=7) 2024-11-16T19:27:49,884 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26 with version=8 2024-11-16T19:27:49,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/hbase-staging 2024-11-16T19:27:49,887 INFO [Time-limited test {}] client.ConnectionUtils(128): master/d11ab77873cb:0 server-side Connection retries=45 2024-11-16T19:27:49,887 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T19:27:49,887 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T19:27:49,887 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T19:27:49,887 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T19:27:49,887 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T19:27:49,887 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T19:27:49,887 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T19:27:49,888 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40407 2024-11-16T19:27:49,890 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40407 connecting to ZooKeeper ensemble=127.0.0.1:57302 2024-11-16T19:27:49,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:404070x0, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T19:27:49,894 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40407-0x1004a017edb0000 connected 2024-11-16T19:27:49,905 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:27:49,907 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:27:49,911 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40407-0x1004a017edb0000, quorum=127.0.0.1:57302, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T19:27:49,911 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26, hbase.cluster.distributed=false 2024-11-16T19:27:49,913 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40407-0x1004a017edb0000, quorum=127.0.0.1:57302, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T19:27:49,914 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40407 2024-11-16T19:27:49,914 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40407 2024-11-16T19:27:49,914 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40407 2024-11-16T19:27:49,914 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40407 2024-11-16T19:27:49,915 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40407 2024-11-16T19:27:49,930 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/d11ab77873cb:0 server-side Connection retries=45 2024-11-16T19:27:49,930 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T19:27:49,930 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T19:27:49,930 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T19:27:49,930 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T19:27:49,930 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T19:27:49,930 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T19:27:49,931 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T19:27:49,931 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38077 2024-11-16T19:27:49,933 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38077 connecting to ZooKeeper ensemble=127.0.0.1:57302 2024-11-16T19:27:49,933 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:27:49,935 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:27:49,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:380770x0, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T19:27:49,940 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38077-0x1004a017edb0001 connected 2024-11-16T19:27:49,940 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38077-0x1004a017edb0001, quorum=127.0.0.1:57302, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T19:27:49,940 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T19:27:49,941 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T19:27:49,941 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38077-0x1004a017edb0001, quorum=127.0.0.1:57302, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T19:27:49,942 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38077-0x1004a017edb0001, quorum=127.0.0.1:57302, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T19:27:49,943 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38077 2024-11-16T19:27:49,943 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38077 2024-11-16T19:27:49,943 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38077 2024-11-16T19:27:49,944 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38077 2024-11-16T19:27:49,944 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38077 2024-11-16T19:27:49,959 DEBUG [M:0;d11ab77873cb:40407 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;d11ab77873cb:40407 2024-11-16T19:27:49,960 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/d11ab77873cb,40407,1731785269886 2024-11-16T19:27:49,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38077-0x1004a017edb0001, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T19:27:49,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1004a017edb0000, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T19:27:49,962 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40407-0x1004a017edb0000, quorum=127.0.0.1:57302, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/d11ab77873cb,40407,1731785269886 2024-11-16T19:27:49,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1004a017edb0000, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:27:49,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38077-0x1004a017edb0001, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T19:27:49,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38077-0x1004a017edb0001, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:27:49,963 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40407-0x1004a017edb0000, quorum=127.0.0.1:57302, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T19:27:49,964 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/d11ab77873cb,40407,1731785269886 from backup master directory 2024-11-16T19:27:49,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1004a017edb0000, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/d11ab77873cb,40407,1731785269886 2024-11-16T19:27:49,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1004a017edb0000, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T19:27:49,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38077-0x1004a017edb0001, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T19:27:49,965 WARN [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T19:27:49,965 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=d11ab77873cb,40407,1731785269886 2024-11-16T19:27:49,969 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/hbase.id] with ID: 8404e94a-b877-4e19-97ae-03f1bde7564a 2024-11-16T19:27:49,969 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/.tmp/hbase.id 2024-11-16T19:27:49,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42519 is added to blk_1073741826_1002 (size=42) 2024-11-16T19:27:49,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35145 is added to blk_1073741826_1002 (size=42) 2024-11-16T19:27:49,978 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/.tmp/hbase.id]:[hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/hbase.id] 2024-11-16T19:27:49,995 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:27:49,996 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T19:27:49,997 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-16T19:27:49,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38077-0x1004a017edb0001, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:27:49,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1004a017edb0000, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:27:50,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35145 is added to blk_1073741827_1003 (size=196) 2024-11-16T19:27:50,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42519 is added to blk_1073741827_1003 (size=196) 2024-11-16T19:27:50,009 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T19:27:50,009 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T19:27:50,010 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T19:27:50,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42519 is added to blk_1073741828_1004 (size=1189) 2024-11-16T19:27:50,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35145 is added to blk_1073741828_1004 (size=1189) 2024-11-16T19:27:50,020 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/MasterData/data/master/store 2024-11-16T19:27:50,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42519 is added to blk_1073741829_1005 (size=34) 2024-11-16T19:27:50,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35145 is added to blk_1073741829_1005 (size=34) 2024-11-16T19:27:50,029 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:27:50,029 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T19:27:50,029 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:27:50,029 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:27:50,029 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T19:27:50,029 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:27:50,029 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:27:50,030 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731785270029Disabling compacts and flushes for region at 1731785270029Disabling writes for close at 1731785270029Writing region close event to WAL at 1731785270029Closed at 1731785270029 2024-11-16T19:27:50,031 WARN [master/d11ab77873cb:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/MasterData/data/master/store/.initializing 2024-11-16T19:27:50,031 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/MasterData/WALs/d11ab77873cb,40407,1731785269886 2024-11-16T19:27:50,035 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d11ab77873cb%2C40407%2C1731785269886, suffix=, logDir=hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/MasterData/WALs/d11ab77873cb,40407,1731785269886, archiveDir=hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/MasterData/oldWALs, maxLogs=10 2024-11-16T19:27:50,036 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C40407%2C1731785269886.1731785270035 2024-11-16T19:27:50,042 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/MasterData/WALs/d11ab77873cb,40407,1731785269886/d11ab77873cb%2C40407%2C1731785269886.1731785270035 2024-11-16T19:27:50,046 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46393:46393),(127.0.0.1/127.0.0.1:41679:41679)] 2024-11-16T19:27:50,046 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T19:27:50,047 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:27:50,047 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:27:50,047 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:27:50,049 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:27:50,050 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T19:27:50,051 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:27:50,051 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:27:50,051 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:27:50,053 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T19:27:50,053 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:27:50,054 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T19:27:50,054 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:27:50,056 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T19:27:50,056 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:27:50,057 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T19:27:50,057 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:27:50,059 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T19:27:50,059 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:27:50,060 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T19:27:50,060 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:27:50,061 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:27:50,062 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:27:50,063 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:27:50,064 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:27:50,064 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T19:27:50,066 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:27:50,069 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T19:27:50,070 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=809988, jitterRate=0.029953911900520325}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T19:27:50,071 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731785270047Initializing all the Stores at 1731785270048 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785270048Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785270049 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785270049Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785270049Cleaning up temporary data from old regions at 1731785270064 (+15 ms)Region opened successfully at 1731785270071 (+7 ms) 2024-11-16T19:27:50,072 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T19:27:50,077 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29b4a233, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d11ab77873cb/172.17.0.2:0 2024-11-16T19:27:50,078 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T19:27:50,078 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T19:27:50,078 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T19:27:50,079 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T19:27:50,079 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-16T19:27:50,080 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T19:27:50,080 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T19:27:50,083 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T19:27:50,084 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40407-0x1004a017edb0000, quorum=127.0.0.1:57302, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T19:27:50,085 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T19:27:50,086 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T19:27:50,087 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40407-0x1004a017edb0000, quorum=127.0.0.1:57302, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T19:27:50,088 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T19:27:50,088 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T19:27:50,090 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40407-0x1004a017edb0000, quorum=127.0.0.1:57302, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T19:27:50,091 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T19:27:50,092 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40407-0x1004a017edb0000, quorum=127.0.0.1:57302, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T19:27:50,093 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T19:27:50,095 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40407-0x1004a017edb0000, quorum=127.0.0.1:57302, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T19:27:50,096 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T19:27:50,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38077-0x1004a017edb0001, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T19:27:50,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1004a017edb0000, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T19:27:50,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1004a017edb0000, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:27:50,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38077-0x1004a017edb0001, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:27:50,098 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=d11ab77873cb,40407,1731785269886, sessionid=0x1004a017edb0000, setting cluster-up flag (Was=false) 2024-11-16T19:27:50,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1004a017edb0000, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:27:50,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38077-0x1004a017edb0001, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:27:50,105 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T19:27:50,106 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d11ab77873cb,40407,1731785269886 2024-11-16T19:27:50,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1004a017edb0000, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:27:50,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38077-0x1004a017edb0001, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:27:50,113 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T19:27:50,114 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d11ab77873cb,40407,1731785269886 2024-11-16T19:27:50,115 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T19:27:50,117 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T19:27:50,117 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T19:27:50,117 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T19:27:50,117 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: d11ab77873cb,40407,1731785269886 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T19:27:50,119 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/d11ab77873cb:0, corePoolSize=5, maxPoolSize=5 2024-11-16T19:27:50,119 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/d11ab77873cb:0, corePoolSize=5, maxPoolSize=5 2024-11-16T19:27:50,119 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/d11ab77873cb:0, corePoolSize=5, maxPoolSize=5 2024-11-16T19:27:50,120 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/d11ab77873cb:0, corePoolSize=5, maxPoolSize=5 2024-11-16T19:27:50,120 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/d11ab77873cb:0, corePoolSize=10, maxPoolSize=10 2024-11-16T19:27:50,120 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:50,120 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/d11ab77873cb:0, corePoolSize=2, maxPoolSize=2 2024-11-16T19:27:50,120 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:50,123 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T19:27:50,123 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T19:27:50,125 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:27:50,125 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731785300125 2024-11-16T19:27:50,125 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T19:27:50,125 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T19:27:50,125 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T19:27:50,125 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T19:27:50,125 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T19:27:50,125 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T19:27:50,126 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T19:27:50,126 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:50,129 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T19:27:50,129 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T19:27:50,129 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T19:27:50,130 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T19:27:50,130 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T19:27:50,130 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.large.0-1731785270130,5,FailOnTimeoutGroup] 2024-11-16T19:27:50,130 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.small.0-1731785270130,5,FailOnTimeoutGroup] 2024-11-16T19:27:50,130 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:50,130 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T19:27:50,130 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:50,131 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:50,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42519 is added to blk_1073741831_1007 (size=1321) 2024-11-16T19:27:50,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35145 is added to blk_1073741831_1007 (size=1321) 2024-11-16T19:27:50,139 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T19:27:50,140 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26 2024-11-16T19:27:50,146 INFO [RS:0;d11ab77873cb:38077 {}] regionserver.HRegionServer(746): ClusterId : 8404e94a-b877-4e19-97ae-03f1bde7564a 2024-11-16T19:27:50,146 DEBUG [RS:0;d11ab77873cb:38077 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T19:27:50,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42519 is added to blk_1073741832_1008 (size=32) 2024-11-16T19:27:50,153 DEBUG [RS:0;d11ab77873cb:38077 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T19:27:50,153 DEBUG [RS:0;d11ab77873cb:38077 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T19:27:50,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35145 is added to blk_1073741832_1008 (size=32) 2024-11-16T19:27:50,155 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:27:50,155 DEBUG [RS:0;d11ab77873cb:38077 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T19:27:50,155 DEBUG [RS:0;d11ab77873cb:38077 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5610c897, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d11ab77873cb/172.17.0.2:0 2024-11-16T19:27:50,157 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T19:27:50,161 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T19:27:50,161 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:27:50,162 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:27:50,162 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T19:27:50,164 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T19:27:50,164 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:27:50,165 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:27:50,165 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T19:27:50,167 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T19:27:50,167 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:27:50,168 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:27:50,169 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T19:27:50,171 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T19:27:50,171 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:27:50,172 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:27:50,172 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T19:27:50,172 DEBUG [RS:0;d11ab77873cb:38077 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;d11ab77873cb:38077 2024-11-16T19:27:50,173 INFO [RS:0;d11ab77873cb:38077 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T19:27:50,173 INFO [RS:0;d11ab77873cb:38077 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T19:27:50,173 DEBUG [RS:0;d11ab77873cb:38077 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T19:27:50,173 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/data/hbase/meta/1588230740 2024-11-16T19:27:50,174 INFO [RS:0;d11ab77873cb:38077 {}] regionserver.HRegionServer(2659): reportForDuty to master=d11ab77873cb,40407,1731785269886 with port=38077, startcode=1731785269930 2024-11-16T19:27:50,174 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/data/hbase/meta/1588230740 2024-11-16T19:27:50,174 DEBUG [RS:0;d11ab77873cb:38077 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T19:27:50,176 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T19:27:50,176 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T19:27:50,177 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T19:27:50,178 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37957, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T19:27:50,179 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T19:27:50,180 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40407 {}] master.ServerManager(363): Checking decommissioned status of RegionServer d11ab77873cb,38077,1731785269930 2024-11-16T19:27:50,180 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40407 {}] master.ServerManager(517): Registering regionserver=d11ab77873cb,38077,1731785269930 2024-11-16T19:27:50,183 DEBUG [RS:0;d11ab77873cb:38077 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26 2024-11-16T19:27:50,183 DEBUG [RS:0;d11ab77873cb:38077 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36261 2024-11-16T19:27:50,183 DEBUG [RS:0;d11ab77873cb:38077 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T19:27:50,183 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T19:27:50,184 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=767097, jitterRate=-0.02458612620830536}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T19:27:50,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1004a017edb0000, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T19:27:50,186 DEBUG [RS:0;d11ab77873cb:38077 {}] zookeeper.ZKUtil(111): regionserver:38077-0x1004a017edb0001, quorum=127.0.0.1:57302, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/d11ab77873cb,38077,1731785269930 2024-11-16T19:27:50,186 WARN [RS:0;d11ab77873cb:38077 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T19:27:50,186 INFO [RS:0;d11ab77873cb:38077 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T19:27:50,186 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731785270155Initializing all the Stores at 1731785270156 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785270157 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785270157Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785270157Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785270157Cleaning up temporary data from old regions at 1731785270176 (+19 ms)Region opened successfully at 1731785270186 (+10 ms) 2024-11-16T19:27:50,186 DEBUG [RS:0;d11ab77873cb:38077 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/WALs/d11ab77873cb,38077,1731785269930 2024-11-16T19:27:50,186 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T19:27:50,186 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T19:27:50,186 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T19:27:50,187 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T19:27:50,187 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T19:27:50,187 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T19:27:50,187 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731785270186Disabling compacts and flushes for region at 1731785270186Disabling writes for close at 1731785270187 (+1 ms)Writing region close event to WAL at 1731785270187Closed at 1731785270187 2024-11-16T19:27:50,188 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [d11ab77873cb,38077,1731785269930] 2024-11-16T19:27:50,189 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T19:27:50,189 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T19:27:50,190 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T19:27:50,193 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T19:27:50,195 INFO [RS:0;d11ab77873cb:38077 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T19:27:50,195 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T19:27:50,199 INFO [RS:0;d11ab77873cb:38077 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T19:27:50,200 INFO [RS:0;d11ab77873cb:38077 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T19:27:50,200 INFO [RS:0;d11ab77873cb:38077 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:50,201 INFO [RS:0;d11ab77873cb:38077 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T19:27:50,202 INFO [RS:0;d11ab77873cb:38077 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T19:27:50,202 INFO [RS:0;d11ab77873cb:38077 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:50,203 DEBUG [RS:0;d11ab77873cb:38077 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:50,203 DEBUG [RS:0;d11ab77873cb:38077 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:50,203 DEBUG [RS:0;d11ab77873cb:38077 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:50,203 DEBUG [RS:0;d11ab77873cb:38077 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:50,203 DEBUG [RS:0;d11ab77873cb:38077 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:50,203 DEBUG [RS:0;d11ab77873cb:38077 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/d11ab77873cb:0, corePoolSize=2, maxPoolSize=2 2024-11-16T19:27:50,203 DEBUG [RS:0;d11ab77873cb:38077 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:50,203 DEBUG [RS:0;d11ab77873cb:38077 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:50,203 DEBUG [RS:0;d11ab77873cb:38077 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:50,204 DEBUG [RS:0;d11ab77873cb:38077 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:50,204 DEBUG [RS:0;d11ab77873cb:38077 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:50,204 DEBUG [RS:0;d11ab77873cb:38077 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:50,204 DEBUG [RS:0;d11ab77873cb:38077 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/d11ab77873cb:0, corePoolSize=3, maxPoolSize=3 2024-11-16T19:27:50,204 DEBUG [RS:0;d11ab77873cb:38077 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0, corePoolSize=3, maxPoolSize=3 2024-11-16T19:27:50,204 INFO [RS:0;d11ab77873cb:38077 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:50,205 INFO [RS:0;d11ab77873cb:38077 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:50,205 INFO [RS:0;d11ab77873cb:38077 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:50,205 INFO [RS:0;d11ab77873cb:38077 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:50,205 INFO [RS:0;d11ab77873cb:38077 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:50,205 INFO [RS:0;d11ab77873cb:38077 {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,38077,1731785269930-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T19:27:50,220 INFO [RS:0;d11ab77873cb:38077 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T19:27:50,221 INFO [RS:0;d11ab77873cb:38077 {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,38077,1731785269930-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:50,221 INFO [RS:0;d11ab77873cb:38077 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:50,221 INFO [RS:0;d11ab77873cb:38077 {}] regionserver.Replication(171): d11ab77873cb,38077,1731785269930 started 2024-11-16T19:27:50,236 INFO [RS:0;d11ab77873cb:38077 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:50,236 INFO [RS:0;d11ab77873cb:38077 {}] regionserver.HRegionServer(1482): Serving as d11ab77873cb,38077,1731785269930, RpcServer on d11ab77873cb/172.17.0.2:38077, sessionid=0x1004a017edb0001 2024-11-16T19:27:50,236 DEBUG [RS:0;d11ab77873cb:38077 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T19:27:50,236 DEBUG [RS:0;d11ab77873cb:38077 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager d11ab77873cb,38077,1731785269930 2024-11-16T19:27:50,236 DEBUG [RS:0;d11ab77873cb:38077 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd11ab77873cb,38077,1731785269930' 2024-11-16T19:27:50,236 DEBUG [RS:0;d11ab77873cb:38077 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T19:27:50,237 DEBUG [RS:0;d11ab77873cb:38077 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T19:27:50,238 DEBUG [RS:0;d11ab77873cb:38077 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T19:27:50,238 DEBUG [RS:0;d11ab77873cb:38077 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T19:27:50,238 DEBUG [RS:0;d11ab77873cb:38077 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager d11ab77873cb,38077,1731785269930 2024-11-16T19:27:50,238 DEBUG [RS:0;d11ab77873cb:38077 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd11ab77873cb,38077,1731785269930' 2024-11-16T19:27:50,238 DEBUG [RS:0;d11ab77873cb:38077 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T19:27:50,238 DEBUG [RS:0;d11ab77873cb:38077 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T19:27:50,239 DEBUG [RS:0;d11ab77873cb:38077 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T19:27:50,239 INFO [RS:0;d11ab77873cb:38077 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T19:27:50,239 INFO [RS:0;d11ab77873cb:38077 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T19:27:50,345 INFO [RS:0;d11ab77873cb:38077 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d11ab77873cb%2C38077%2C1731785269930, suffix=, logDir=hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/WALs/d11ab77873cb,38077,1731785269930, archiveDir=hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/oldWALs, maxLogs=32 2024-11-16T19:27:50,346 WARN [d11ab77873cb:40407 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-16T19:27:50,353 INFO [RS:0;d11ab77873cb:38077 {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C38077%2C1731785269930.1731785270352 2024-11-16T19:27:50,359 INFO [RS:0;d11ab77873cb:38077 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/WALs/d11ab77873cb,38077,1731785269930/d11ab77873cb%2C38077%2C1731785269930.1731785270352 2024-11-16T19:27:50,360 DEBUG [RS:0;d11ab77873cb:38077 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46393:46393),(127.0.0.1/127.0.0.1:41679:41679)] 2024-11-16T19:27:50,596 DEBUG [d11ab77873cb:40407 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T19:27:50,597 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=d11ab77873cb,38077,1731785269930 2024-11-16T19:27:50,599 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d11ab77873cb,38077,1731785269930, state=OPENING 2024-11-16T19:27:50,600 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T19:27:50,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38077-0x1004a017edb0001, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:27:50,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1004a017edb0000, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:27:50,604 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T19:27:50,604 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T19:27:50,604 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T19:27:50,604 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=d11ab77873cb,38077,1731785269930}] 2024-11-16T19:27:50,758 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T19:27:50,761 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46099, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T19:27:50,765 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T19:27:50,765 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T19:27:50,768 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d11ab77873cb%2C38077%2C1731785269930.meta, suffix=.meta, logDir=hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/WALs/d11ab77873cb,38077,1731785269930, archiveDir=hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/oldWALs, maxLogs=32 2024-11-16T19:27:50,771 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C38077%2C1731785269930.meta.1731785270771.meta 2024-11-16T19:27:50,779 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/WALs/d11ab77873cb,38077,1731785269930/d11ab77873cb%2C38077%2C1731785269930.meta.1731785270771.meta 2024-11-16T19:27:50,781 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41679:41679),(127.0.0.1/127.0.0.1:46393:46393)] 2024-11-16T19:27:50,781 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T19:27:50,782 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T19:27:50,782 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T19:27:50,782 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T19:27:50,782 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T19:27:50,782 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:27:50,783 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T19:27:50,783 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T19:27:50,785 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T19:27:50,786 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T19:27:50,786 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:27:50,787 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:27:50,787 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T19:27:50,788 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T19:27:50,788 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:27:50,789 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:27:50,789 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T19:27:50,790 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T19:27:50,790 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:27:50,791 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:27:50,791 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T19:27:50,792 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T19:27:50,792 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:27:50,792 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:27:50,793 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T19:27:50,794 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/data/hbase/meta/1588230740 2024-11-16T19:27:50,795 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/data/hbase/meta/1588230740 2024-11-16T19:27:50,797 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T19:27:50,797 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T19:27:50,797 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T19:27:50,799 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T19:27:50,801 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=689842, jitterRate=-0.12282131612300873}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T19:27:50,801 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T19:27:50,802 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731785270783Writing region info on filesystem at 1731785270783Initializing all the Stores at 1731785270784 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785270784Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785270785 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785270785Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785270785Cleaning up temporary data from old regions at 1731785270797 (+12 ms)Running coprocessor post-open hooks at 1731785270801 (+4 ms)Region opened successfully at 1731785270802 (+1 ms) 2024-11-16T19:27:50,803 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731785270758 2024-11-16T19:27:50,807 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T19:27:50,807 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T19:27:50,808 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=d11ab77873cb,38077,1731785269930 2024-11-16T19:27:50,810 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d11ab77873cb,38077,1731785269930, state=OPEN 2024-11-16T19:27:50,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1004a017edb0000, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T19:27:50,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38077-0x1004a017edb0001, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T19:27:50,812 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=d11ab77873cb,38077,1731785269930 2024-11-16T19:27:50,813 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T19:27:50,813 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T19:27:50,818 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T19:27:50,818 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=d11ab77873cb,38077,1731785269930 in 209 msec 2024-11-16T19:27:50,821 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T19:27:50,821 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 628 msec 2024-11-16T19:27:50,823 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T19:27:50,824 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T19:27:50,826 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T19:27:50,826 DEBUG [PEWorker-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d11ab77873cb,38077,1731785269930, seqNum=-1] 2024-11-16T19:27:50,826 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T19:27:50,828 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40063, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T19:27:50,837 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 719 msec 2024-11-16T19:27:50,837 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731785270837, completionTime=-1 2024-11-16T19:27:50,837 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T19:27:50,837 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-16T19:27:50,840 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-16T19:27:50,840 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731785330840 2024-11-16T19:27:50,840 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731785390840 2024-11-16T19:27:50,840 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-16T19:27:50,841 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,40407,1731785269886-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:50,841 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,40407,1731785269886-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:50,841 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,40407,1731785269886-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:50,841 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-d11ab77873cb:40407, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:50,841 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:50,841 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:50,844 DEBUG [master/d11ab77873cb:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T19:27:50,847 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.882sec 2024-11-16T19:27:50,847 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T19:27:50,848 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T19:27:50,848 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T19:27:50,848 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T19:27:50,848 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T19:27:50,848 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,40407,1731785269886-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T19:27:50,848 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,40407,1731785269886-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T19:27:50,851 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T19:27:50,851 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T19:27:50,851 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,40407,1731785269886-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:50,947 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e5835fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T19:27:50,947 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request d11ab77873cb,40407,-1 for getting cluster id 2024-11-16T19:27:50,948 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T19:27:50,951 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8404e94a-b877-4e19-97ae-03f1bde7564a' 2024-11-16T19:27:50,951 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T19:27:50,952 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8404e94a-b877-4e19-97ae-03f1bde7564a" 2024-11-16T19:27:50,952 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@564619b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T19:27:50,953 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [d11ab77873cb,40407,-1] 2024-11-16T19:27:50,953 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T19:27:50,956 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:27:50,958 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60718, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T19:27:50,960 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@205bfd42, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T19:27:50,960 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T19:27:50,962 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d11ab77873cb,38077,1731785269930, seqNum=-1] 2024-11-16T19:27:50,962 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T19:27:50,965 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41482, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T19:27:50,968 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=d11ab77873cb,40407,1731785269886 2024-11-16T19:27:50,969 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:27:50,973 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T19:27:50,973 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T19:27:50,973 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T19:27:50,973 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T19:27:50,973 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:27:50,973 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:27:50,973 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T19:27:50,974 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T19:27:50,974 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1712408714, stopped=false 2024-11-16T19:27:50,974 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=d11ab77873cb,40407,1731785269886 2024-11-16T19:27:50,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38077-0x1004a017edb0001, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T19:27:50,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38077-0x1004a017edb0001, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:27:50,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1004a017edb0000, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T19:27:50,975 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T19:27:50,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1004a017edb0000, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:27:50,975 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T19:27:50,976 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T19:27:50,976 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:27:50,976 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'd11ab77873cb,38077,1731785269930' ***** 2024-11-16T19:27:50,976 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T19:27:50,977 INFO [RS:0;d11ab77873cb:38077 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T19:27:50,977 INFO [RS:0;d11ab77873cb:38077 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T19:27:50,977 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T19:27:50,977 INFO [RS:0;d11ab77873cb:38077 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T19:27:50,977 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40407-0x1004a017edb0000, quorum=127.0.0.1:57302, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T19:27:50,977 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38077-0x1004a017edb0001, quorum=127.0.0.1:57302, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T19:27:50,977 INFO [RS:0;d11ab77873cb:38077 {}] regionserver.HRegionServer(959): stopping server d11ab77873cb,38077,1731785269930 2024-11-16T19:27:50,977 INFO [RS:0;d11ab77873cb:38077 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T19:27:50,977 INFO [RS:0;d11ab77873cb:38077 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;d11ab77873cb:38077. 2024-11-16T19:27:50,977 DEBUG [RS:0;d11ab77873cb:38077 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T19:27:50,977 DEBUG [RS:0;d11ab77873cb:38077 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:27:50,977 INFO [RS:0;d11ab77873cb:38077 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T19:27:50,977 INFO [RS:0;d11ab77873cb:38077 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T19:27:50,977 INFO [RS:0;d11ab77873cb:38077 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T19:27:50,978 INFO [RS:0;d11ab77873cb:38077 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T19:27:50,978 INFO [RS:0;d11ab77873cb:38077 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-16T19:27:50,978 DEBUG [RS:0;d11ab77873cb:38077 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-16T19:27:50,978 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T19:27:50,978 DEBUG [RS:0;d11ab77873cb:38077 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-16T19:27:50,979 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T19:27:50,979 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T19:27:50,979 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T19:27:50,979 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T19:27:50,979 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-16T19:27:50,998 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/data/hbase/meta/1588230740/.tmp/ns/fab5cf220a7c434da9db345e0e7664c5 is 43, key is default/ns:d/1731785270829/Put/seqid=0 2024-11-16T19:27:51,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35145 is added to blk_1073741835_1011 (size=5153) 2024-11-16T19:27:51,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42519 is added to blk_1073741835_1011 (size=5153) 2024-11-16T19:27:51,008 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/data/hbase/meta/1588230740/.tmp/ns/fab5cf220a7c434da9db345e0e7664c5 2024-11-16T19:27:51,018 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/data/hbase/meta/1588230740/.tmp/ns/fab5cf220a7c434da9db345e0e7664c5 as hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/data/hbase/meta/1588230740/ns/fab5cf220a7c434da9db345e0e7664c5 2024-11-16T19:27:51,030 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/data/hbase/meta/1588230740/ns/fab5cf220a7c434da9db345e0e7664c5, entries=2, sequenceid=6, filesize=5.0 K 2024-11-16T19:27:51,031 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 52ms, sequenceid=6, compaction requested=false 2024-11-16T19:27:51,031 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T19:27:51,038 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-16T19:27:51,039 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T19:27:51,039 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T19:27:51,039 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731785270978Running coprocessor pre-close hooks at 1731785270978Disabling compacts and flushes for region at 1731785270978Disabling writes for close at 1731785270979 (+1 ms)Obtaining lock to block concurrent updates at 1731785270979Preparing flush snapshotting stores in 1588230740 at 1731785270979Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731785270979Flushing stores of hbase:meta,,1.1588230740 at 1731785270980 (+1 ms)Flushing 1588230740/ns: creating writer at 1731785270980Flushing 1588230740/ns: appending metadata at 1731785270998 (+18 ms)Flushing 1588230740/ns: closing flushed file at 1731785270998Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@16d487ed: reopening flushed file at 1731785271016 (+18 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 52ms, sequenceid=6, compaction requested=false at 1731785271031 (+15 ms)Writing region close event to WAL at 1731785271033 (+2 ms)Running coprocessor post-close hooks at 1731785271039 (+6 ms)Closed at 1731785271039 2024-11-16T19:27:51,039 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T19:27:51,179 INFO [RS:0;d11ab77873cb:38077 {}] regionserver.HRegionServer(976): stopping server d11ab77873cb,38077,1731785269930; all regions closed. 2024-11-16T19:27:51,179 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:51,180 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:51,180 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:51,180 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:51,180 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:51,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35145 is added to blk_1073741834_1010 (size=1152) 2024-11-16T19:27:51,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42519 is added to blk_1073741834_1010 (size=1152) 2024-11-16T19:27:51,186 DEBUG [RS:0;d11ab77873cb:38077 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/oldWALs 2024-11-16T19:27:51,186 INFO [RS:0;d11ab77873cb:38077 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d11ab77873cb%2C38077%2C1731785269930.meta:.meta(num 1731785270771) 2024-11-16T19:27:51,186 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:51,186 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:51,187 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:51,187 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:51,187 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:51,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42519 is added to blk_1073741833_1009 (size=93) 2024-11-16T19:27:51,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35145 is added to blk_1073741833_1009 (size=93) 2024-11-16T19:27:51,192 DEBUG [RS:0;d11ab77873cb:38077 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/oldWALs 2024-11-16T19:27:51,193 INFO [RS:0;d11ab77873cb:38077 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d11ab77873cb%2C38077%2C1731785269930:(num 1731785270352) 2024-11-16T19:27:51,193 DEBUG [RS:0;d11ab77873cb:38077 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:27:51,193 INFO [RS:0;d11ab77873cb:38077 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T19:27:51,193 INFO [RS:0;d11ab77873cb:38077 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T19:27:51,193 INFO [RS:0;d11ab77873cb:38077 {}] hbase.ChoreService(370): Chore service for: regionserver/d11ab77873cb:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-16T19:27:51,193 INFO [RS:0;d11ab77873cb:38077 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T19:27:51,193 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T19:27:51,193 INFO [RS:0;d11ab77873cb:38077 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38077 2024-11-16T19:27:51,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38077-0x1004a017edb0001, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/d11ab77873cb,38077,1731785269930 2024-11-16T19:27:51,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1004a017edb0000, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T19:27:51,195 INFO [RS:0;d11ab77873cb:38077 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T19:27:51,196 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [d11ab77873cb,38077,1731785269930] 2024-11-16T19:27:51,197 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/d11ab77873cb,38077,1731785269930 already deleted, retry=false 2024-11-16T19:27:51,197 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; d11ab77873cb,38077,1731785269930 expired; onlineServers=0 2024-11-16T19:27:51,197 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'd11ab77873cb,40407,1731785269886' ***** 2024-11-16T19:27:51,197 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T19:27:51,197 INFO [M:0;d11ab77873cb:40407 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T19:27:51,198 INFO [M:0;d11ab77873cb:40407 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T19:27:51,198 DEBUG [M:0;d11ab77873cb:40407 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T19:27:51,198 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T19:27:51,198 DEBUG [M:0;d11ab77873cb:40407 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T19:27:51,198 DEBUG [master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.large.0-1731785270130 {}] cleaner.HFileCleaner(306): Exit Thread[master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.large.0-1731785270130,5,FailOnTimeoutGroup] 2024-11-16T19:27:51,198 DEBUG [master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.small.0-1731785270130 {}] cleaner.HFileCleaner(306): Exit Thread[master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.small.0-1731785270130,5,FailOnTimeoutGroup] 2024-11-16T19:27:51,198 INFO [M:0;d11ab77873cb:40407 {}] hbase.ChoreService(370): Chore service for: master/d11ab77873cb:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T19:27:51,198 INFO [M:0;d11ab77873cb:40407 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T19:27:51,198 DEBUG [M:0;d11ab77873cb:40407 {}] master.HMaster(1795): Stopping service threads 2024-11-16T19:27:51,198 INFO [M:0;d11ab77873cb:40407 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T19:27:51,198 INFO [M:0;d11ab77873cb:40407 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T19:27:51,199 INFO [M:0;d11ab77873cb:40407 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T19:27:51,199 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T19:27:51,199 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1004a017edb0000, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T19:27:51,199 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1004a017edb0000, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:27:51,200 DEBUG [M:0;d11ab77873cb:40407 {}] zookeeper.ZKUtil(347): master:40407-0x1004a017edb0000, quorum=127.0.0.1:57302, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T19:27:51,200 WARN [M:0;d11ab77873cb:40407 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T19:27:51,200 INFO [M:0;d11ab77873cb:40407 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/.lastflushedseqids 2024-11-16T19:27:51,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42519 is added to blk_1073741836_1012 (size=99) 2024-11-16T19:27:51,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35145 is added to blk_1073741836_1012 (size=99) 2024-11-16T19:27:51,217 INFO [M:0;d11ab77873cb:40407 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T19:27:51,217 INFO [M:0;d11ab77873cb:40407 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T19:27:51,217 DEBUG [M:0;d11ab77873cb:40407 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T19:27:51,217 INFO [M:0;d11ab77873cb:40407 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:27:51,217 DEBUG [M:0;d11ab77873cb:40407 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:27:51,217 DEBUG [M:0;d11ab77873cb:40407 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T19:27:51,217 DEBUG [M:0;d11ab77873cb:40407 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:27:51,218 INFO [M:0;d11ab77873cb:40407 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-16T19:27:51,244 DEBUG [M:0;d11ab77873cb:40407 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/337770077fe242cda3089cdad24ed984 is 82, key is hbase:meta,,1/info:regioninfo/1731785270808/Put/seqid=0 2024-11-16T19:27:51,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35145 is added to blk_1073741837_1013 (size=5672) 2024-11-16T19:27:51,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42519 is added to blk_1073741837_1013 (size=5672) 2024-11-16T19:27:51,254 INFO [M:0;d11ab77873cb:40407 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/337770077fe242cda3089cdad24ed984 2024-11-16T19:27:51,283 DEBUG [M:0;d11ab77873cb:40407 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9d0c429a78a643828561ffe8e366c2bb is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731785270836/Put/seqid=0 2024-11-16T19:27:51,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42519 is added to blk_1073741838_1014 (size=5275) 2024-11-16T19:27:51,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35145 is added to blk_1073741838_1014 (size=5275) 2024-11-16T19:27:51,293 INFO [M:0;d11ab77873cb:40407 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9d0c429a78a643828561ffe8e366c2bb 2024-11-16T19:27:51,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38077-0x1004a017edb0001, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T19:27:51,296 INFO [RS:0;d11ab77873cb:38077 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T19:27:51,296 INFO [RS:0;d11ab77873cb:38077 {}] regionserver.HRegionServer(1031): Exiting; stopping=d11ab77873cb,38077,1731785269930; zookeeper connection closed. 2024-11-16T19:27:51,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38077-0x1004a017edb0001, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T19:27:51,297 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@594d302c {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@594d302c 2024-11-16T19:27:51,297 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T19:27:51,317 DEBUG [M:0;d11ab77873cb:40407 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8e93727dda7043dc8e4325e338908b4a is 69, key is d11ab77873cb,38077,1731785269930/rs:state/1731785270180/Put/seqid=0 2024-11-16T19:27:51,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35145 is added to blk_1073741839_1015 (size=5156) 2024-11-16T19:27:51,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42519 is added to blk_1073741839_1015 (size=5156) 2024-11-16T19:27:51,323 INFO [M:0;d11ab77873cb:40407 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8e93727dda7043dc8e4325e338908b4a 2024-11-16T19:27:51,353 DEBUG [M:0;d11ab77873cb:40407 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/dd2a2de4a7fd48a1858ac6c786847c78 is 52, key is load_balancer_on/state:d/1731785270971/Put/seqid=0 2024-11-16T19:27:51,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42519 is added to blk_1073741840_1016 (size=5056) 2024-11-16T19:27:51,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35145 is added to blk_1073741840_1016 (size=5056) 2024-11-16T19:27:51,362 INFO [M:0;d11ab77873cb:40407 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/dd2a2de4a7fd48a1858ac6c786847c78 2024-11-16T19:27:51,370 DEBUG [M:0;d11ab77873cb:40407 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/337770077fe242cda3089cdad24ed984 as hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/337770077fe242cda3089cdad24ed984 2024-11-16T19:27:51,377 INFO [M:0;d11ab77873cb:40407 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/337770077fe242cda3089cdad24ed984, entries=8, sequenceid=29, filesize=5.5 K 2024-11-16T19:27:51,378 DEBUG [M:0;d11ab77873cb:40407 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9d0c429a78a643828561ffe8e366c2bb as hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9d0c429a78a643828561ffe8e366c2bb 2024-11-16T19:27:51,385 INFO [M:0;d11ab77873cb:40407 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9d0c429a78a643828561ffe8e366c2bb, entries=3, sequenceid=29, filesize=5.2 K 2024-11-16T19:27:51,387 DEBUG [M:0;d11ab77873cb:40407 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8e93727dda7043dc8e4325e338908b4a as hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8e93727dda7043dc8e4325e338908b4a 2024-11-16T19:27:51,394 INFO [M:0;d11ab77873cb:40407 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8e93727dda7043dc8e4325e338908b4a, entries=1, sequenceid=29, filesize=5.0 K 2024-11-16T19:27:51,395 DEBUG [M:0;d11ab77873cb:40407 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/dd2a2de4a7fd48a1858ac6c786847c78 as hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/dd2a2de4a7fd48a1858ac6c786847c78 2024-11-16T19:27:51,402 INFO [M:0;d11ab77873cb:40407 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36261/user/jenkins/test-data/e13b03f5-ebc7-6ab4-23f3-c5afd9187e26/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/dd2a2de4a7fd48a1858ac6c786847c78, entries=1, sequenceid=29, filesize=4.9 K 2024-11-16T19:27:51,404 INFO [M:0;d11ab77873cb:40407 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 187ms, sequenceid=29, compaction requested=false 2024-11-16T19:27:51,406 INFO [M:0;d11ab77873cb:40407 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:27:51,406 DEBUG [M:0;d11ab77873cb:40407 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731785271217Disabling compacts and flushes for region at 1731785271217Disabling writes for close at 1731785271217Obtaining lock to block concurrent updates at 1731785271218 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731785271218Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731785271218Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731785271220 (+2 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731785271220Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731785271244 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731785271244Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731785271262 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731785271282 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731785271283 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731785271300 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731785271316 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731785271316Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731785271332 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731785271352 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731785271352Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5868cc95: reopening flushed file at 1731785271368 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6a02eb86: reopening flushed file at 1731785271377 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@d96c7fc: reopening flushed file at 1731785271386 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7e481b84: reopening flushed file at 1731785271394 (+8 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 187ms, sequenceid=29, compaction requested=false at 1731785271404 (+10 ms)Writing region close event to WAL at 1731785271405 (+1 ms)Closed at 1731785271405 2024-11-16T19:27:51,406 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:51,406 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:51,406 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:51,406 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:51,407 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:27:51,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42519 is added to blk_1073741830_1006 (size=10311) 2024-11-16T19:27:51,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35145 is added to blk_1073741830_1006 (size=10311) 2024-11-16T19:27:51,414 INFO [M:0;d11ab77873cb:40407 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T19:27:51,414 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T19:27:51,414 INFO [M:0;d11ab77873cb:40407 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40407 2024-11-16T19:27:51,414 INFO [M:0;d11ab77873cb:40407 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T19:27:51,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1004a017edb0000, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T19:27:51,517 INFO [M:0;d11ab77873cb:40407 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T19:27:51,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40407-0x1004a017edb0000, quorum=127.0.0.1:57302, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T19:27:51,522 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@c708570{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:27:51,523 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7ab06e68{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T19:27:51,523 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T19:27:51,523 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d944f53{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T19:27:51,523 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@463a48f5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/hadoop.log.dir/,STOPPED} 2024-11-16T19:27:51,525 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T19:27:51,525 WARN [BP-1042754415-172.17.0.2-1731785269281 heartbeating to localhost/127.0.0.1:36261 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T19:27:51,525 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T19:27:51,525 WARN [BP-1042754415-172.17.0.2-1731785269281 heartbeating to localhost/127.0.0.1:36261 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1042754415-172.17.0.2-1731785269281 (Datanode Uuid b870fa3e-51be-4adb-b923-90e3c402411e) service to localhost/127.0.0.1:36261 2024-11-16T19:27:51,525 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/cluster_b5a9424a-2b64-4c46-3399-927841de40ac/data/data3/current/BP-1042754415-172.17.0.2-1731785269281 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:27:51,526 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/cluster_b5a9424a-2b64-4c46-3399-927841de40ac/data/data4/current/BP-1042754415-172.17.0.2-1731785269281 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:27:51,526 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T19:27:51,528 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@30a1c2a3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:27:51,528 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4620cd8a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T19:27:51,528 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T19:27:51,528 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@607b9bc6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T19:27:51,529 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@55f7876e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/hadoop.log.dir/,STOPPED} 2024-11-16T19:27:51,530 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T19:27:51,530 WARN [BP-1042754415-172.17.0.2-1731785269281 heartbeating to localhost/127.0.0.1:36261 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T19:27:51,530 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T19:27:51,530 WARN [BP-1042754415-172.17.0.2-1731785269281 heartbeating to localhost/127.0.0.1:36261 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1042754415-172.17.0.2-1731785269281 (Datanode Uuid f6365867-f6d9-48b6-ab57-6985e0eb94ba) service to localhost/127.0.0.1:36261 2024-11-16T19:27:51,531 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/cluster_b5a9424a-2b64-4c46-3399-927841de40ac/data/data1/current/BP-1042754415-172.17.0.2-1731785269281 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:27:51,531 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/cluster_b5a9424a-2b64-4c46-3399-927841de40ac/data/data2/current/BP-1042754415-172.17.0.2-1731785269281 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:27:51,531 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T19:27:51,536 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@52b0c086{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T19:27:51,537 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4d9de743{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T19:27:51,537 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T19:27:51,538 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a3c3ceb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T19:27:51,538 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3bd9c5b4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/hadoop.log.dir/,STOPPED} 2024-11-16T19:27:51,545 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T19:27:51,567 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T19:27:51,567 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T19:27:51,567 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/hadoop.log.dir so I do NOT create it in target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3 2024-11-16T19:27:51,567 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dffd129d-b722-71c0-fbb6-114fb2f76c90/hadoop.tmp.dir so I do NOT create it in target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3 2024-11-16T19:27:51,567 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8, deleteOnExit=true 2024-11-16T19:27:51,567 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T19:27:51,568 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/test.cache.data in system properties and HBase conf 2024-11-16T19:27:51,568 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T19:27:51,568 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/hadoop.log.dir in system properties and HBase conf 2024-11-16T19:27:51,568 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T19:27:51,568 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T19:27:51,568 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T19:27:51,568 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T19:27:51,569 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T19:27:51,569 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T19:27:51,569 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T19:27:51,569 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T19:27:51,569 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T19:27:51,569 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T19:27:51,569 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T19:27:51,570 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T19:27:51,570 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T19:27:51,570 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/nfs.dump.dir in system properties and HBase conf 2024-11-16T19:27:51,570 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/java.io.tmpdir in system properties and HBase conf 2024-11-16T19:27:51,570 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T19:27:51,570 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T19:27:51,570 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T19:27:51,586 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T19:27:51,648 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:27:51,657 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T19:27:51,661 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T19:27:51,661 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T19:27:51,661 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T19:27:51,662 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:27:51,663 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16369da1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/hadoop.log.dir/,AVAILABLE} 2024-11-16T19:27:51,663 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e7025d4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T19:27:51,757 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7cf515b1{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/java.io.tmpdir/jetty-localhost-43663-hadoop-hdfs-3_4_1-tests_jar-_-any-13645973182068897577/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T19:27:51,758 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@c6abea1{HTTP/1.1, (http/1.1)}{localhost:43663} 2024-11-16T19:27:51,758 INFO [Time-limited test {}] server.Server(415): Started @98412ms 2024-11-16T19:27:51,769 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T19:27:51,819 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:27:51,822 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T19:27:51,823 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T19:27:51,823 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T19:27:51,823 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T19:27:51,824 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35a03ba9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/hadoop.log.dir/,AVAILABLE} 2024-11-16T19:27:51,824 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5bb4f47b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T19:27:51,921 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@f1f9cf1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/java.io.tmpdir/jetty-localhost-41605-hadoop-hdfs-3_4_1-tests_jar-_-any-17295602740806516928/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:27:51,921 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2df55a{HTTP/1.1, (http/1.1)}{localhost:41605} 2024-11-16T19:27:51,922 INFO [Time-limited test {}] server.Server(415): Started @98576ms 2024-11-16T19:27:51,923 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T19:27:51,954 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:27:51,958 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T19:27:51,958 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T19:27:51,958 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T19:27:51,958 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T19:27:51,960 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d3d4ef0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/hadoop.log.dir/,AVAILABLE} 2024-11-16T19:27:51,960 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@39a69c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T19:27:51,989 WARN [Thread-650 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data1/current/BP-1424824963-172.17.0.2-1731785271598/current, will proceed with Du for space computation calculation, 2024-11-16T19:27:51,990 WARN [Thread-651 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data2/current/BP-1424824963-172.17.0.2-1731785271598/current, will proceed with Du for space computation calculation, 2024-11-16T19:27:52,011 WARN [Thread-629 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T19:27:52,014 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8e84a9bca5c88501 with lease ID 0xf34050bba09898e7: Processing first storage report for DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9 from datanode DatanodeRegistration(127.0.0.1:46515, datanodeUuid=c845b32f-7331-4469-af57-91259711f9b4, infoPort=33293, infoSecurePort=0, ipcPort=33759, storageInfo=lv=-57;cid=testClusterID;nsid=714393905;c=1731785271598) 2024-11-16T19:27:52,014 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8e84a9bca5c88501 with lease ID 0xf34050bba09898e7: from storage DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9 node DatanodeRegistration(127.0.0.1:46515, datanodeUuid=c845b32f-7331-4469-af57-91259711f9b4, infoPort=33293, infoSecurePort=0, ipcPort=33759, storageInfo=lv=-57;cid=testClusterID;nsid=714393905;c=1731785271598), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:27:52,014 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8e84a9bca5c88501 with lease ID 0xf34050bba09898e7: Processing first storage report for DS-4e455a72-37db-49e0-9cc8-425d68f49601 from datanode DatanodeRegistration(127.0.0.1:46515, datanodeUuid=c845b32f-7331-4469-af57-91259711f9b4, infoPort=33293, infoSecurePort=0, ipcPort=33759, storageInfo=lv=-57;cid=testClusterID;nsid=714393905;c=1731785271598) 2024-11-16T19:27:52,014 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8e84a9bca5c88501 with lease ID 0xf34050bba09898e7: from storage DS-4e455a72-37db-49e0-9cc8-425d68f49601 node DatanodeRegistration(127.0.0.1:46515, datanodeUuid=c845b32f-7331-4469-af57-91259711f9b4, infoPort=33293, infoSecurePort=0, ipcPort=33759, storageInfo=lv=-57;cid=testClusterID;nsid=714393905;c=1731785271598), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:27:52,068 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@52be898{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/java.io.tmpdir/jetty-localhost-45747-hadoop-hdfs-3_4_1-tests_jar-_-any-16623301355068336885/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:27:52,068 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@520d17ab{HTTP/1.1, (http/1.1)}{localhost:45747} 2024-11-16T19:27:52,069 INFO [Time-limited test {}] server.Server(415): Started @98723ms 2024-11-16T19:27:52,070 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T19:27:52,144 WARN [Thread-676 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data3/current/BP-1424824963-172.17.0.2-1731785271598/current, will proceed with Du for space computation calculation, 2024-11-16T19:27:52,145 WARN [Thread-677 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data4/current/BP-1424824963-172.17.0.2-1731785271598/current, will proceed with Du for space computation calculation, 2024-11-16T19:27:52,171 WARN [Thread-665 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T19:27:52,174 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd20497c1578a4dbd with lease ID 0xf34050bba09898e8: Processing first storage report for DS-e9ee80de-25e2-4782-abd9-24b5fa243d58 from datanode DatanodeRegistration(127.0.0.1:44959, datanodeUuid=b65ae952-8681-4d98-9cea-83182561fe12, infoPort=44625, infoSecurePort=0, ipcPort=34931, storageInfo=lv=-57;cid=testClusterID;nsid=714393905;c=1731785271598) 2024-11-16T19:27:52,174 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd20497c1578a4dbd with lease ID 0xf34050bba09898e8: from storage DS-e9ee80de-25e2-4782-abd9-24b5fa243d58 node DatanodeRegistration(127.0.0.1:44959, datanodeUuid=b65ae952-8681-4d98-9cea-83182561fe12, infoPort=44625, infoSecurePort=0, ipcPort=34931, storageInfo=lv=-57;cid=testClusterID;nsid=714393905;c=1731785271598), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:27:52,174 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd20497c1578a4dbd with lease ID 0xf34050bba09898e8: Processing first storage report for DS-837ddefb-7f5a-4437-8ee1-23cf6bb243b8 from datanode DatanodeRegistration(127.0.0.1:44959, datanodeUuid=b65ae952-8681-4d98-9cea-83182561fe12, infoPort=44625, infoSecurePort=0, ipcPort=34931, storageInfo=lv=-57;cid=testClusterID;nsid=714393905;c=1731785271598) 2024-11-16T19:27:52,174 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd20497c1578a4dbd with lease ID 0xf34050bba09898e8: from storage DS-837ddefb-7f5a-4437-8ee1-23cf6bb243b8 node DatanodeRegistration(127.0.0.1:44959, datanodeUuid=b65ae952-8681-4d98-9cea-83182561fe12, infoPort=44625, infoSecurePort=0, ipcPort=34931, storageInfo=lv=-57;cid=testClusterID;nsid=714393905;c=1731785271598), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:27:52,205 INFO [regionserver/d11ab77873cb:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T19:27:52,214 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3 2024-11-16T19:27:52,217 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/zookeeper_0, clientPort=59815, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T19:27:52,218 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59815 2024-11-16T19:27:52,219 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:27:52,220 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:27:52,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44959 is added to blk_1073741825_1001 (size=7) 2024-11-16T19:27:52,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46515 is added to blk_1073741825_1001 (size=7) 2024-11-16T19:27:52,232 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4 with version=8 2024-11-16T19:27:52,233 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/hbase-staging 2024-11-16T19:27:52,236 INFO [Time-limited test {}] client.ConnectionUtils(128): master/d11ab77873cb:0 server-side Connection retries=45 2024-11-16T19:27:52,236 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T19:27:52,236 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T19:27:52,236 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T19:27:52,236 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T19:27:52,236 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T19:27:52,236 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T19:27:52,236 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T19:27:52,237 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42783 2024-11-16T19:27:52,240 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42783 connecting to ZooKeeper ensemble=127.0.0.1:59815 2024-11-16T19:27:52,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:427830x0, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T19:27:52,245 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42783-0x1004a0188070000 connected 2024-11-16T19:27:52,267 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:27:52,269 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:27:52,272 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42783-0x1004a0188070000, quorum=127.0.0.1:59815, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T19:27:52,273 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4, hbase.cluster.distributed=false 2024-11-16T19:27:52,274 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42783-0x1004a0188070000, quorum=127.0.0.1:59815, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T19:27:52,275 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42783 2024-11-16T19:27:52,275 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42783 2024-11-16T19:27:52,277 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42783 2024-11-16T19:27:52,280 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42783 2024-11-16T19:27:52,281 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42783 2024-11-16T19:27:52,299 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/d11ab77873cb:0 server-side Connection retries=45 2024-11-16T19:27:52,299 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T19:27:52,299 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T19:27:52,299 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T19:27:52,299 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T19:27:52,299 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T19:27:52,299 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T19:27:52,299 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T19:27:52,300 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36045 2024-11-16T19:27:52,302 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36045 connecting to ZooKeeper ensemble=127.0.0.1:59815 2024-11-16T19:27:52,302 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:27:52,304 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:27:52,309 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:360450x0, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T19:27:52,309 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36045-0x1004a0188070001 connected 2024-11-16T19:27:52,310 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36045-0x1004a0188070001, quorum=127.0.0.1:59815, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T19:27:52,310 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T19:27:52,311 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T19:27:52,312 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36045-0x1004a0188070001, quorum=127.0.0.1:59815, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T19:27:52,313 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36045-0x1004a0188070001, quorum=127.0.0.1:59815, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T19:27:52,317 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36045 2024-11-16T19:27:52,318 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36045 2024-11-16T19:27:52,318 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36045 2024-11-16T19:27:52,319 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36045 2024-11-16T19:27:52,321 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36045 2024-11-16T19:27:52,336 DEBUG [M:0;d11ab77873cb:42783 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;d11ab77873cb:42783 2024-11-16T19:27:52,336 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/d11ab77873cb,42783,1731785272235 2024-11-16T19:27:52,338 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36045-0x1004a0188070001, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T19:27:52,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42783-0x1004a0188070000, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T19:27:52,339 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42783-0x1004a0188070000, quorum=127.0.0.1:59815, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/d11ab77873cb,42783,1731785272235 2024-11-16T19:27:52,340 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36045-0x1004a0188070001, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T19:27:52,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42783-0x1004a0188070000, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:27:52,340 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36045-0x1004a0188070001, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:27:52,342 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42783-0x1004a0188070000, quorum=127.0.0.1:59815, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T19:27:52,342 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/d11ab77873cb,42783,1731785272235 from backup master directory 2024-11-16T19:27:52,343 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36045-0x1004a0188070001, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T19:27:52,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42783-0x1004a0188070000, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/d11ab77873cb,42783,1731785272235 2024-11-16T19:27:52,343 WARN [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T19:27:52,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42783-0x1004a0188070000, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T19:27:52,343 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=d11ab77873cb,42783,1731785272235 2024-11-16T19:27:52,351 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/hbase.id] with ID: d20320a4-660f-453c-b387-7fbffc8cf7f6 2024-11-16T19:27:52,351 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/.tmp/hbase.id 2024-11-16T19:27:52,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44959 is added to blk_1073741826_1002 (size=42) 2024-11-16T19:27:52,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46515 is added to blk_1073741826_1002 (size=42) 2024-11-16T19:27:52,362 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/.tmp/hbase.id]:[hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/hbase.id] 2024-11-16T19:27:52,385 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:27:52,385 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T19:27:52,387 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-16T19:27:52,389 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36045-0x1004a0188070001, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:27:52,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42783-0x1004a0188070000, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:27:52,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46515 is added to blk_1073741827_1003 (size=196) 2024-11-16T19:27:52,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44959 is added to blk_1073741827_1003 (size=196) 2024-11-16T19:27:52,401 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T19:27:52,402 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T19:27:52,403 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T19:27:52,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46515 is added to blk_1073741828_1004 (size=1189) 2024-11-16T19:27:52,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44959 is added to blk_1073741828_1004 (size=1189) 2024-11-16T19:27:52,422 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/data/master/store 2024-11-16T19:27:52,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44959 is added to blk_1073741829_1005 (size=34) 2024-11-16T19:27:52,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46515 is added to blk_1073741829_1005 (size=34) 2024-11-16T19:27:52,436 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:27:52,436 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T19:27:52,436 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:27:52,436 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:27:52,436 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T19:27:52,437 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:27:52,437 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:27:52,437 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731785272436Disabling compacts and flushes for region at 1731785272436Disabling writes for close at 1731785272437 (+1 ms)Writing region close event to WAL at 1731785272437Closed at 1731785272437 2024-11-16T19:27:52,439 WARN [master/d11ab77873cb:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/data/master/store/.initializing 2024-11-16T19:27:52,439 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/WALs/d11ab77873cb,42783,1731785272235 2024-11-16T19:27:52,443 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d11ab77873cb%2C42783%2C1731785272235, suffix=, logDir=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/WALs/d11ab77873cb,42783,1731785272235, archiveDir=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/oldWALs, maxLogs=10 2024-11-16T19:27:52,443 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C42783%2C1731785272235.1731785272443 2024-11-16T19:27:52,449 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/WALs/d11ab77873cb,42783,1731785272235/d11ab77873cb%2C42783%2C1731785272235.1731785272443 2024-11-16T19:27:52,454 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44625:44625),(127.0.0.1/127.0.0.1:33293:33293)] 2024-11-16T19:27:52,456 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T19:27:52,457 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:27:52,457 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:27:52,457 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:27:52,461 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:27:52,463 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T19:27:52,464 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:27:52,464 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:27:52,464 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:27:52,466 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T19:27:52,467 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:27:52,467 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T19:27:52,468 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:27:52,469 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T19:27:52,470 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:27:52,470 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T19:27:52,471 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:27:52,472 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T19:27:52,472 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:27:52,473 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T19:27:52,473 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:27:52,474 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:27:52,475 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:27:52,476 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:27:52,476 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:27:52,477 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T19:27:52,478 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:27:52,484 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T19:27:52,485 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=819526, jitterRate=0.04208187758922577}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T19:27:52,487 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731785272457Initializing all the Stores at 1731785272458 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785272458Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785272461 (+3 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785272461Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785272461Cleaning up temporary data from old regions at 1731785272476 (+15 ms)Region opened successfully at 1731785272486 (+10 ms) 2024-11-16T19:27:52,487 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T19:27:52,493 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ef0ae2d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d11ab77873cb/172.17.0.2:0 2024-11-16T19:27:52,495 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T19:27:52,495 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T19:27:52,495 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T19:27:52,495 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T19:27:52,496 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-16T19:27:52,496 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T19:27:52,496 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T19:27:52,499 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T19:27:52,500 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42783-0x1004a0188070000, quorum=127.0.0.1:59815, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T19:27:52,501 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T19:27:52,502 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T19:27:52,502 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42783-0x1004a0188070000, quorum=127.0.0.1:59815, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T19:27:52,503 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T19:27:52,503 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T19:27:52,504 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42783-0x1004a0188070000, quorum=127.0.0.1:59815, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T19:27:52,505 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T19:27:52,506 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42783-0x1004a0188070000, quorum=127.0.0.1:59815, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T19:27:52,507 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T19:27:52,510 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42783-0x1004a0188070000, quorum=127.0.0.1:59815, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T19:27:52,511 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T19:27:52,513 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36045-0x1004a0188070001, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T19:27:52,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42783-0x1004a0188070000, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T19:27:52,513 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36045-0x1004a0188070001, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:27:52,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42783-0x1004a0188070000, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:27:52,514 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=d11ab77873cb,42783,1731785272235, sessionid=0x1004a0188070000, setting cluster-up flag (Was=false) 2024-11-16T19:27:52,515 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36045-0x1004a0188070001, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:27:52,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42783-0x1004a0188070000, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:27:52,519 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T19:27:52,520 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d11ab77873cb,42783,1731785272235 2024-11-16T19:27:52,522 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36045-0x1004a0188070001, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:27:52,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42783-0x1004a0188070000, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:27:52,526 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T19:27:52,527 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d11ab77873cb,42783,1731785272235 2024-11-16T19:27:52,528 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T19:27:52,530 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T19:27:52,530 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T19:27:52,530 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T19:27:52,530 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: d11ab77873cb,42783,1731785272235 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T19:27:52,532 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/d11ab77873cb:0, corePoolSize=5, maxPoolSize=5 2024-11-16T19:27:52,532 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/d11ab77873cb:0, corePoolSize=5, maxPoolSize=5 2024-11-16T19:27:52,532 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/d11ab77873cb:0, corePoolSize=5, maxPoolSize=5 2024-11-16T19:27:52,532 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/d11ab77873cb:0, corePoolSize=5, maxPoolSize=5 2024-11-16T19:27:52,532 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/d11ab77873cb:0, corePoolSize=10, maxPoolSize=10 2024-11-16T19:27:52,532 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:52,532 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/d11ab77873cb:0, corePoolSize=2, maxPoolSize=2 2024-11-16T19:27:52,532 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:52,536 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T19:27:52,537 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T19:27:52,537 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731785302537 2024-11-16T19:27:52,537 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T19:27:52,538 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T19:27:52,538 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T19:27:52,538 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T19:27:52,538 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T19:27:52,538 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T19:27:52,538 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:27:52,538 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T19:27:52,541 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:52,544 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T19:27:52,544 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T19:27:52,544 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T19:27:52,546 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T19:27:52,546 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T19:27:52,548 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.large.0-1731785272546,5,FailOnTimeoutGroup] 2024-11-16T19:27:52,548 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.small.0-1731785272548,5,FailOnTimeoutGroup] 2024-11-16T19:27:52,549 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:52,549 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T19:27:52,549 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:52,549 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:52,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44959 is added to blk_1073741831_1007 (size=1321) 2024-11-16T19:27:52,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46515 is added to blk_1073741831_1007 (size=1321) 2024-11-16T19:27:52,623 INFO [RS:0;d11ab77873cb:36045 {}] regionserver.HRegionServer(746): ClusterId : d20320a4-660f-453c-b387-7fbffc8cf7f6 2024-11-16T19:27:52,623 DEBUG [RS:0;d11ab77873cb:36045 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T19:27:52,625 DEBUG [RS:0;d11ab77873cb:36045 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T19:27:52,625 DEBUG [RS:0;d11ab77873cb:36045 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T19:27:52,627 DEBUG [RS:0;d11ab77873cb:36045 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T19:27:52,627 DEBUG [RS:0;d11ab77873cb:36045 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f9ddbc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d11ab77873cb/172.17.0.2:0 2024-11-16T19:27:52,645 DEBUG [RS:0;d11ab77873cb:36045 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;d11ab77873cb:36045 2024-11-16T19:27:52,645 INFO [RS:0;d11ab77873cb:36045 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T19:27:52,645 INFO [RS:0;d11ab77873cb:36045 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T19:27:52,645 DEBUG [RS:0;d11ab77873cb:36045 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T19:27:52,646 INFO [RS:0;d11ab77873cb:36045 {}] regionserver.HRegionServer(2659): reportForDuty to master=d11ab77873cb,42783,1731785272235 with port=36045, startcode=1731785272299 2024-11-16T19:27:52,647 DEBUG [RS:0;d11ab77873cb:36045 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T19:27:52,649 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47287, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T19:27:52,649 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42783 {}] master.ServerManager(363): Checking decommissioned status of RegionServer d11ab77873cb,36045,1731785272299 2024-11-16T19:27:52,649 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42783 {}] master.ServerManager(517): Registering regionserver=d11ab77873cb,36045,1731785272299 2024-11-16T19:27:52,652 DEBUG [RS:0;d11ab77873cb:36045 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4 2024-11-16T19:27:52,652 DEBUG [RS:0;d11ab77873cb:36045 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41599 2024-11-16T19:27:52,652 DEBUG [RS:0;d11ab77873cb:36045 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T19:27:52,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42783-0x1004a0188070000, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T19:27:52,654 DEBUG [RS:0;d11ab77873cb:36045 {}] zookeeper.ZKUtil(111): regionserver:36045-0x1004a0188070001, quorum=127.0.0.1:59815, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/d11ab77873cb,36045,1731785272299 2024-11-16T19:27:52,654 WARN [RS:0;d11ab77873cb:36045 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T19:27:52,654 INFO [RS:0;d11ab77873cb:36045 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T19:27:52,654 DEBUG [RS:0;d11ab77873cb:36045 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299 2024-11-16T19:27:52,655 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [d11ab77873cb,36045,1731785272299] 2024-11-16T19:27:52,659 INFO [RS:0;d11ab77873cb:36045 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T19:27:52,662 INFO [RS:0;d11ab77873cb:36045 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T19:27:52,663 INFO [RS:0;d11ab77873cb:36045 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T19:27:52,663 INFO [RS:0;d11ab77873cb:36045 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:52,665 INFO [RS:0;d11ab77873cb:36045 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T19:27:52,666 INFO [RS:0;d11ab77873cb:36045 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T19:27:52,666 INFO [RS:0;d11ab77873cb:36045 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:52,667 DEBUG [RS:0;d11ab77873cb:36045 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:52,667 DEBUG [RS:0;d11ab77873cb:36045 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:52,667 DEBUG [RS:0;d11ab77873cb:36045 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:52,667 DEBUG [RS:0;d11ab77873cb:36045 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:52,667 DEBUG [RS:0;d11ab77873cb:36045 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:52,667 DEBUG [RS:0;d11ab77873cb:36045 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/d11ab77873cb:0, corePoolSize=2, maxPoolSize=2 2024-11-16T19:27:52,667 DEBUG [RS:0;d11ab77873cb:36045 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:52,667 DEBUG [RS:0;d11ab77873cb:36045 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:52,667 DEBUG [RS:0;d11ab77873cb:36045 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:52,667 DEBUG [RS:0;d11ab77873cb:36045 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:52,668 DEBUG [RS:0;d11ab77873cb:36045 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:52,668 DEBUG [RS:0;d11ab77873cb:36045 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:52,668 DEBUG [RS:0;d11ab77873cb:36045 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/d11ab77873cb:0, corePoolSize=3, maxPoolSize=3 2024-11-16T19:27:52,668 DEBUG [RS:0;d11ab77873cb:36045 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0, corePoolSize=3, maxPoolSize=3 2024-11-16T19:27:52,669 INFO [RS:0;d11ab77873cb:36045 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:52,669 INFO [RS:0;d11ab77873cb:36045 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:52,669 INFO [RS:0;d11ab77873cb:36045 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:52,669 INFO [RS:0;d11ab77873cb:36045 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:52,669 INFO [RS:0;d11ab77873cb:36045 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:52,669 INFO [RS:0;d11ab77873cb:36045 {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,36045,1731785272299-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T19:27:52,693 INFO [RS:0;d11ab77873cb:36045 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T19:27:52,693 INFO [RS:0;d11ab77873cb:36045 {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,36045,1731785272299-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:52,693 INFO [RS:0;d11ab77873cb:36045 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:52,693 INFO [RS:0;d11ab77873cb:36045 {}] regionserver.Replication(171): d11ab77873cb,36045,1731785272299 started 2024-11-16T19:27:52,716 INFO [RS:0;d11ab77873cb:36045 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:52,716 INFO [RS:0;d11ab77873cb:36045 {}] regionserver.HRegionServer(1482): Serving as d11ab77873cb,36045,1731785272299, RpcServer on d11ab77873cb/172.17.0.2:36045, sessionid=0x1004a0188070001 2024-11-16T19:27:52,717 DEBUG [RS:0;d11ab77873cb:36045 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T19:27:52,717 DEBUG [RS:0;d11ab77873cb:36045 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager d11ab77873cb,36045,1731785272299 2024-11-16T19:27:52,717 DEBUG [RS:0;d11ab77873cb:36045 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd11ab77873cb,36045,1731785272299' 2024-11-16T19:27:52,717 DEBUG [RS:0;d11ab77873cb:36045 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T19:27:52,718 DEBUG [RS:0;d11ab77873cb:36045 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T19:27:52,718 DEBUG [RS:0;d11ab77873cb:36045 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T19:27:52,718 DEBUG [RS:0;d11ab77873cb:36045 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T19:27:52,718 DEBUG [RS:0;d11ab77873cb:36045 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager d11ab77873cb,36045,1731785272299 2024-11-16T19:27:52,718 DEBUG [RS:0;d11ab77873cb:36045 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd11ab77873cb,36045,1731785272299' 2024-11-16T19:27:52,718 DEBUG [RS:0;d11ab77873cb:36045 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T19:27:52,719 DEBUG [RS:0;d11ab77873cb:36045 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T19:27:52,719 DEBUG [RS:0;d11ab77873cb:36045 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T19:27:52,719 INFO [RS:0;d11ab77873cb:36045 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T19:27:52,719 INFO [RS:0;d11ab77873cb:36045 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T19:27:52,822 INFO [RS:0;d11ab77873cb:36045 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d11ab77873cb%2C36045%2C1731785272299, suffix=, logDir=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299, archiveDir=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/oldWALs, maxLogs=32 2024-11-16T19:27:52,823 INFO [RS:0;d11ab77873cb:36045 {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C36045%2C1731785272299.1731785272823 2024-11-16T19:27:52,836 INFO [RS:0;d11ab77873cb:36045 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.1731785272823 2024-11-16T19:27:52,842 DEBUG [RS:0;d11ab77873cb:36045 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44625:44625),(127.0.0.1/127.0.0.1:33293:33293)] 2024-11-16T19:27:52,954 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T19:27:52,954 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4 2024-11-16T19:27:52,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46515 is added to blk_1073741833_1009 (size=32) 2024-11-16T19:27:52,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44959 is added to blk_1073741833_1009 (size=32) 2024-11-16T19:27:52,973 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:27:52,975 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T19:27:52,977 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T19:27:52,977 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:27:52,981 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:27:52,982 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T19:27:52,984 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T19:27:52,984 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:27:52,984 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:27:52,985 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T19:27:52,990 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T19:27:52,990 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:27:52,991 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:27:52,991 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T19:27:52,993 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T19:27:52,993 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:27:52,994 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:27:52,994 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T19:27:52,995 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/hbase/meta/1588230740 2024-11-16T19:27:52,996 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/hbase/meta/1588230740 2024-11-16T19:27:52,997 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T19:27:52,998 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T19:27:52,998 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T19:27:53,000 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T19:27:53,003 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T19:27:53,004 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=843764, jitterRate=0.07290251553058624}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T19:27:53,005 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731785272973Initializing all the Stores at 1731785272974 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785272975 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785272975Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785272975Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785272975Cleaning up temporary data from old regions at 1731785272998 (+23 ms)Region opened successfully at 1731785273004 (+6 ms) 2024-11-16T19:27:53,005 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T19:27:53,005 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T19:27:53,005 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T19:27:53,005 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T19:27:53,005 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T19:27:53,005 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T19:27:53,005 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731785273005Disabling compacts and flushes for region at 1731785273005Disabling writes for close at 1731785273005Writing region close event to WAL at 1731785273005Closed at 1731785273005 2024-11-16T19:27:53,007 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T19:27:53,007 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T19:27:53,007 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T19:27:53,009 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T19:27:53,010 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T19:27:53,160 DEBUG [d11ab77873cb:42783 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T19:27:53,161 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=d11ab77873cb,36045,1731785272299 2024-11-16T19:27:53,163 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d11ab77873cb,36045,1731785272299, state=OPENING 2024-11-16T19:27:53,164 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T19:27:53,166 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36045-0x1004a0188070001, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:27:53,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42783-0x1004a0188070000, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:27:53,167 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T19:27:53,167 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T19:27:53,168 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T19:27:53,168 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=d11ab77873cb,36045,1731785272299}] 2024-11-16T19:27:53,323 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T19:27:53,323 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:27:53,326 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50111, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T19:27:53,332 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T19:27:53,332 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T19:27:53,333 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:27:53,335 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d11ab77873cb%2C36045%2C1731785272299.meta, suffix=.meta, logDir=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299, archiveDir=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/oldWALs, maxLogs=32 2024-11-16T19:27:53,336 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta 2024-11-16T19:27:53,349 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta 2024-11-16T19:27:53,353 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33293:33293),(127.0.0.1/127.0.0.1:44625:44625)] 2024-11-16T19:27:53,355 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T19:27:53,355 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T19:27:53,355 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T19:27:53,356 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T19:27:53,356 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T19:27:53,356 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:27:53,356 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T19:27:53,356 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T19:27:53,361 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T19:27:53,363 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T19:27:53,363 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:27:53,364 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:27:53,364 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T19:27:53,366 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T19:27:53,366 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:27:53,367 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:27:53,367 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T19:27:53,368 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T19:27:53,368 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:27:53,371 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:27:53,371 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T19:27:53,374 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T19:27:53,374 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:27:53,375 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:27:53,375 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T19:27:53,378 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/hbase/meta/1588230740 2024-11-16T19:27:53,381 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/hbase/meta/1588230740 2024-11-16T19:27:53,383 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T19:27:53,383 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T19:27:53,385 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T19:27:53,387 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T19:27:53,389 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=866646, jitterRate=0.10199812054634094}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T19:27:53,389 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T19:27:53,390 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731785273356Writing region info on filesystem at 1731785273356Initializing all the Stores at 1731785273359 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785273359Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785273361 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785273361Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785273361Cleaning up temporary data from old regions at 1731785273383 (+22 ms)Running coprocessor post-open hooks at 1731785273389 (+6 ms)Region opened successfully at 1731785273390 (+1 ms) 2024-11-16T19:27:53,394 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731785273322 2024-11-16T19:27:53,398 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T19:27:53,399 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T19:27:53,400 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=d11ab77873cb,36045,1731785272299 2024-11-16T19:27:53,404 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d11ab77873cb,36045,1731785272299, state=OPEN 2024-11-16T19:27:53,407 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36045-0x1004a0188070001, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T19:27:53,407 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42783-0x1004a0188070000, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T19:27:53,407 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=d11ab77873cb,36045,1731785272299 2024-11-16T19:27:53,407 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T19:27:53,408 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T19:27:53,413 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T19:27:53,413 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=d11ab77873cb,36045,1731785272299 in 239 msec 2024-11-16T19:27:53,418 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T19:27:53,418 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 406 msec 2024-11-16T19:27:53,420 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T19:27:53,420 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T19:27:53,423 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T19:27:53,423 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d11ab77873cb,36045,1731785272299, seqNum=-1] 2024-11-16T19:27:53,423 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T19:27:53,425 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39321, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T19:27:53,439 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 907 msec 2024-11-16T19:27:53,439 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731785273439, completionTime=-1 2024-11-16T19:27:53,440 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T19:27:53,440 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-16T19:27:53,443 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-16T19:27:53,443 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731785333443 2024-11-16T19:27:53,443 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731785393443 2024-11-16T19:27:53,443 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 3 msec 2024-11-16T19:27:53,444 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,42783,1731785272235-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:53,444 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,42783,1731785272235-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:53,444 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,42783,1731785272235-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:53,444 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-d11ab77873cb:42783, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:53,444 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:53,445 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:53,448 DEBUG [master/d11ab77873cb:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T19:27:53,452 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.109sec 2024-11-16T19:27:53,452 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T19:27:53,453 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T19:27:53,453 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T19:27:53,453 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T19:27:53,453 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T19:27:53,453 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,42783,1731785272235-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T19:27:53,453 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,42783,1731785272235-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T19:27:53,462 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T19:27:53,462 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T19:27:53,463 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,42783,1731785272235-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:53,523 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53c8d161, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T19:27:53,524 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request d11ab77873cb,42783,-1 for getting cluster id 2024-11-16T19:27:53,524 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T19:27:53,526 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd20320a4-660f-453c-b387-7fbffc8cf7f6' 2024-11-16T19:27:53,526 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T19:27:53,526 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d20320a4-660f-453c-b387-7fbffc8cf7f6" 2024-11-16T19:27:53,527 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@132de1c8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T19:27:53,527 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [d11ab77873cb,42783,-1] 2024-11-16T19:27:53,527 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T19:27:53,528 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:27:53,529 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38106, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T19:27:53,530 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4cae84f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T19:27:53,531 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T19:27:53,532 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d11ab77873cb,36045,1731785272299, seqNum=-1] 2024-11-16T19:27:53,533 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T19:27:53,534 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53198, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T19:27:53,537 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=d11ab77873cb,42783,1731785272235 2024-11-16T19:27:53,537 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:27:53,541 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T19:27:53,563 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/d11ab77873cb:0 server-side Connection retries=45 2024-11-16T19:27:53,563 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T19:27:53,563 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T19:27:53,563 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T19:27:53,563 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T19:27:53,563 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T19:27:53,563 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T19:27:53,564 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T19:27:53,564 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45785 2024-11-16T19:27:53,566 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45785 connecting to ZooKeeper ensemble=127.0.0.1:59815 2024-11-16T19:27:53,567 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:27:53,570 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:27:53,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:457850x0, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T19:27:53,575 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:457850x0, quorum=127.0.0.1:59815, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-16T19:27:53,575 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-16T19:27:53,575 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45785-0x1004a0188070002 connected 2024-11-16T19:27:53,576 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T19:27:53,581 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T19:27:53,581 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:45785-0x1004a0188070002, quorum=127.0.0.1:59815, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T19:27:53,583 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45785-0x1004a0188070002, quorum=127.0.0.1:59815, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T19:27:53,589 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45785 2024-11-16T19:27:53,589 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45785 2024-11-16T19:27:53,590 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45785 2024-11-16T19:27:53,590 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45785 2024-11-16T19:27:53,590 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45785 2024-11-16T19:27:53,592 INFO [RS:1;d11ab77873cb:45785 {}] regionserver.HRegionServer(746): ClusterId : d20320a4-660f-453c-b387-7fbffc8cf7f6 2024-11-16T19:27:53,592 DEBUG [RS:1;d11ab77873cb:45785 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T19:27:53,593 DEBUG [RS:1;d11ab77873cb:45785 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T19:27:53,593 DEBUG [RS:1;d11ab77873cb:45785 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T19:27:53,595 DEBUG [RS:1;d11ab77873cb:45785 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T19:27:53,595 DEBUG [RS:1;d11ab77873cb:45785 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4aedb955, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d11ab77873cb/172.17.0.2:0 2024-11-16T19:27:53,607 DEBUG [RS:1;d11ab77873cb:45785 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;d11ab77873cb:45785 2024-11-16T19:27:53,607 INFO [RS:1;d11ab77873cb:45785 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T19:27:53,607 INFO [RS:1;d11ab77873cb:45785 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T19:27:53,607 DEBUG [RS:1;d11ab77873cb:45785 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T19:27:53,608 INFO [RS:1;d11ab77873cb:45785 {}] regionserver.HRegionServer(2659): reportForDuty to master=d11ab77873cb,42783,1731785272235 with port=45785, startcode=1731785273562 2024-11-16T19:27:53,608 DEBUG [RS:1;d11ab77873cb:45785 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T19:27:53,610 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38971, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T19:27:53,610 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42783 {}] master.ServerManager(363): Checking decommissioned status of RegionServer d11ab77873cb,45785,1731785273562 2024-11-16T19:27:53,610 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42783 {}] master.ServerManager(517): Registering regionserver=d11ab77873cb,45785,1731785273562 2024-11-16T19:27:53,612 DEBUG [RS:1;d11ab77873cb:45785 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4 2024-11-16T19:27:53,612 DEBUG [RS:1;d11ab77873cb:45785 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41599 2024-11-16T19:27:53,612 DEBUG [RS:1;d11ab77873cb:45785 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T19:27:53,614 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42783-0x1004a0188070000, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T19:27:53,614 DEBUG [RS:1;d11ab77873cb:45785 {}] zookeeper.ZKUtil(111): regionserver:45785-0x1004a0188070002, quorum=127.0.0.1:59815, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/d11ab77873cb,45785,1731785273562 2024-11-16T19:27:53,614 WARN [RS:1;d11ab77873cb:45785 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T19:27:53,614 INFO [RS:1;d11ab77873cb:45785 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T19:27:53,614 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [d11ab77873cb,45785,1731785273562] 2024-11-16T19:27:53,614 DEBUG [RS:1;d11ab77873cb:45785 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562 2024-11-16T19:27:53,620 INFO [RS:1;d11ab77873cb:45785 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T19:27:53,625 INFO [RS:1;d11ab77873cb:45785 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T19:27:53,625 INFO [RS:1;d11ab77873cb:45785 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T19:27:53,625 INFO [RS:1;d11ab77873cb:45785 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:53,626 INFO [RS:1;d11ab77873cb:45785 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T19:27:53,627 INFO [RS:1;d11ab77873cb:45785 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T19:27:53,627 INFO [RS:1;d11ab77873cb:45785 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:53,627 DEBUG [RS:1;d11ab77873cb:45785 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:53,627 DEBUG [RS:1;d11ab77873cb:45785 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:53,627 DEBUG [RS:1;d11ab77873cb:45785 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:53,627 DEBUG [RS:1;d11ab77873cb:45785 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:53,627 DEBUG [RS:1;d11ab77873cb:45785 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:53,627 DEBUG [RS:1;d11ab77873cb:45785 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/d11ab77873cb:0, corePoolSize=2, maxPoolSize=2 2024-11-16T19:27:53,627 DEBUG [RS:1;d11ab77873cb:45785 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:53,627 DEBUG [RS:1;d11ab77873cb:45785 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:53,628 DEBUG [RS:1;d11ab77873cb:45785 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:53,628 DEBUG [RS:1;d11ab77873cb:45785 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:53,628 DEBUG [RS:1;d11ab77873cb:45785 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:53,628 DEBUG [RS:1;d11ab77873cb:45785 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:27:53,628 DEBUG [RS:1;d11ab77873cb:45785 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/d11ab77873cb:0, corePoolSize=3, maxPoolSize=3 2024-11-16T19:27:53,628 DEBUG [RS:1;d11ab77873cb:45785 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0, corePoolSize=3, maxPoolSize=3 2024-11-16T19:27:53,630 INFO [RS:1;d11ab77873cb:45785 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:53,631 INFO [RS:1;d11ab77873cb:45785 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:53,631 INFO [RS:1;d11ab77873cb:45785 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:53,631 INFO [RS:1;d11ab77873cb:45785 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:53,631 INFO [RS:1;d11ab77873cb:45785 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:53,631 INFO [RS:1;d11ab77873cb:45785 {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,45785,1731785273562-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T19:27:53,676 INFO [RS:1;d11ab77873cb:45785 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T19:27:53,677 INFO [RS:1;d11ab77873cb:45785 {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,45785,1731785273562-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:53,677 INFO [RS:1;d11ab77873cb:45785 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:53,677 INFO [RS:1;d11ab77873cb:45785 {}] regionserver.Replication(171): d11ab77873cb,45785,1731785273562 started 2024-11-16T19:27:53,694 INFO [RS:1;d11ab77873cb:45785 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:27:53,694 INFO [RS:1;d11ab77873cb:45785 {}] regionserver.HRegionServer(1482): Serving as d11ab77873cb,45785,1731785273562, RpcServer on d11ab77873cb/172.17.0.2:45785, sessionid=0x1004a0188070002 2024-11-16T19:27:53,694 DEBUG [RS:1;d11ab77873cb:45785 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T19:27:53,694 DEBUG [RS:1;d11ab77873cb:45785 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager d11ab77873cb,45785,1731785273562 2024-11-16T19:27:53,694 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;d11ab77873cb:45785,5,FailOnTimeoutGroup] 2024-11-16T19:27:53,694 DEBUG [RS:1;d11ab77873cb:45785 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd11ab77873cb,45785,1731785273562' 2024-11-16T19:27:53,694 DEBUG [RS:1;d11ab77873cb:45785 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T19:27:53,694 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-16T19:27:53,695 DEBUG [RS:1;d11ab77873cb:45785 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T19:27:53,695 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-16T19:27:53,696 DEBUG [RS:1;d11ab77873cb:45785 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T19:27:53,696 DEBUG [RS:1;d11ab77873cb:45785 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T19:27:53,696 DEBUG [RS:1;d11ab77873cb:45785 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager d11ab77873cb,45785,1731785273562 2024-11-16T19:27:53,696 DEBUG [RS:1;d11ab77873cb:45785 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd11ab77873cb,45785,1731785273562' 2024-11-16T19:27:53,696 DEBUG [RS:1;d11ab77873cb:45785 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T19:27:53,696 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is d11ab77873cb,42783,1731785272235 2024-11-16T19:27:53,696 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@63a82de5 2024-11-16T19:27:53,697 DEBUG [RS:1;d11ab77873cb:45785 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T19:27:53,697 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-16T19:27:53,697 DEBUG [RS:1;d11ab77873cb:45785 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T19:27:53,697 INFO [RS:1;d11ab77873cb:45785 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T19:27:53,697 INFO [RS:1;d11ab77873cb:45785 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T19:27:53,704 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38116, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-16T19:27:53,705 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42783 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-16T19:27:53,705 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42783 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-16T19:27:53,705 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42783 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T19:27:53,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42783 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-16T19:27:53,709 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T19:27:53,709 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:27:53,709 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42783 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-16T19:27:53,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42783 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T19:27:53,711 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T19:27:53,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44959 is added to blk_1073741835_1011 (size=393) 2024-11-16T19:27:53,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46515 is added to blk_1073741835_1011 (size=393) 2024-11-16T19:27:53,726 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c8746695b3d38667decb7d27505c93be, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4 2024-11-16T19:27:53,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46515 is added to blk_1073741836_1012 (size=76) 2024-11-16T19:27:53,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44959 is added to blk_1073741836_1012 (size=76) 2024-11-16T19:27:53,736 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:27:53,736 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing c8746695b3d38667decb7d27505c93be, disabling compactions & flushes 2024-11-16T19:27:53,736 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be. 2024-11-16T19:27:53,736 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be. 2024-11-16T19:27:53,736 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be. after waiting 0 ms 2024-11-16T19:27:53,736 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be. 2024-11-16T19:27:53,736 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be. 2024-11-16T19:27:53,736 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for c8746695b3d38667decb7d27505c93be: Waiting for close lock at 1731785273736Disabling compacts and flushes for region at 1731785273736Disabling writes for close at 1731785273736Writing region close event to WAL at 1731785273736Closed at 1731785273736 2024-11-16T19:27:53,740 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T19:27:53,741 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731785273741"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731785273741"}]},"ts":"1731785273741"} 2024-11-16T19:27:53,744 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-16T19:27:53,745 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T19:27:53,746 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731785273745"}]},"ts":"1731785273745"} 2024-11-16T19:27:53,748 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-16T19:27:53,749 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=c8746695b3d38667decb7d27505c93be, ASSIGN}] 2024-11-16T19:27:53,751 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=c8746695b3d38667decb7d27505c93be, ASSIGN 2024-11-16T19:27:53,752 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=c8746695b3d38667decb7d27505c93be, ASSIGN; state=OFFLINE, location=d11ab77873cb,36045,1731785272299; forceNewPlan=false, retain=false 2024-11-16T19:27:53,800 INFO [RS:1;d11ab77873cb:45785 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d11ab77873cb%2C45785%2C1731785273562, suffix=, logDir=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562, archiveDir=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/oldWALs, maxLogs=32 2024-11-16T19:27:53,801 INFO [RS:1;d11ab77873cb:45785 {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C45785%2C1731785273562.1731785273801 2024-11-16T19:27:53,808 INFO [RS:1;d11ab77873cb:45785 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 2024-11-16T19:27:53,810 DEBUG [RS:1;d11ab77873cb:45785 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44625:44625),(127.0.0.1/127.0.0.1:33293:33293)] 2024-11-16T19:27:53,859 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T19:27:53,867 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:27:53,891 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:27:53,895 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:27:53,896 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:27:53,903 INFO [d11ab77873cb:42783 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-16T19:27:53,904 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c8746695b3d38667decb7d27505c93be, regionState=OPENING, regionLocation=d11ab77873cb,36045,1731785272299 2024-11-16T19:27:53,908 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=c8746695b3d38667decb7d27505c93be, ASSIGN because future has completed 2024-11-16T19:27:53,909 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c8746695b3d38667decb7d27505c93be, server=d11ab77873cb,36045,1731785272299}] 2024-11-16T19:27:54,068 INFO [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be. 2024-11-16T19:27:54,068 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => c8746695b3d38667decb7d27505c93be, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be.', STARTKEY => '', ENDKEY => ''} 2024-11-16T19:27:54,069 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath c8746695b3d38667decb7d27505c93be 2024-11-16T19:27:54,069 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:27:54,069 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for c8746695b3d38667decb7d27505c93be 2024-11-16T19:27:54,069 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for c8746695b3d38667decb7d27505c93be 2024-11-16T19:27:54,071 INFO [StoreOpener-c8746695b3d38667decb7d27505c93be-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region c8746695b3d38667decb7d27505c93be 2024-11-16T19:27:54,073 INFO [StoreOpener-c8746695b3d38667decb7d27505c93be-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c8746695b3d38667decb7d27505c93be columnFamilyName info 2024-11-16T19:27:54,073 DEBUG [StoreOpener-c8746695b3d38667decb7d27505c93be-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:27:54,074 INFO [StoreOpener-c8746695b3d38667decb7d27505c93be-1 {}] regionserver.HStore(327): Store=c8746695b3d38667decb7d27505c93be/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T19:27:54,074 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for c8746695b3d38667decb7d27505c93be 2024-11-16T19:27:54,075 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be 2024-11-16T19:27:54,075 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be 2024-11-16T19:27:54,076 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for c8746695b3d38667decb7d27505c93be 2024-11-16T19:27:54,076 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for c8746695b3d38667decb7d27505c93be 2024-11-16T19:27:54,078 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for c8746695b3d38667decb7d27505c93be 2024-11-16T19:27:54,080 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T19:27:54,081 INFO [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened c8746695b3d38667decb7d27505c93be; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=744370, jitterRate=-0.05348525941371918}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T19:27:54,081 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c8746695b3d38667decb7d27505c93be 2024-11-16T19:27:54,081 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for c8746695b3d38667decb7d27505c93be: Running coprocessor pre-open hook at 1731785274069Writing region info on filesystem at 1731785274069Initializing all the Stores at 1731785274070 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785274070Cleaning up temporary data from old regions at 1731785274076 (+6 ms)Running coprocessor post-open hooks at 1731785274081 (+5 ms)Region opened successfully at 1731785274081 2024-11-16T19:27:54,083 INFO [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be., pid=6, masterSystemTime=1731785274064 2024-11-16T19:27:54,085 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be. 2024-11-16T19:27:54,085 INFO [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be. 2024-11-16T19:27:54,086 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c8746695b3d38667decb7d27505c93be, regionState=OPEN, openSeqNum=2, regionLocation=d11ab77873cb,36045,1731785272299 2024-11-16T19:27:54,088 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c8746695b3d38667decb7d27505c93be, server=d11ab77873cb,36045,1731785272299 because future has completed 2024-11-16T19:27:54,092 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-16T19:27:54,093 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure c8746695b3d38667decb7d27505c93be, server=d11ab77873cb,36045,1731785272299 in 181 msec 2024-11-16T19:27:54,095 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-16T19:27:54,095 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=c8746695b3d38667decb7d27505c93be, ASSIGN in 343 msec 2024-11-16T19:27:54,096 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T19:27:54,097 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731785274096"}]},"ts":"1731785274096"} 2024-11-16T19:27:54,099 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-16T19:27:54,100 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T19:27:54,103 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 394 msec 2024-11-16T19:27:58,859 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T19:27:58,862 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:27:58,886 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:27:58,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:27:58,891 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:27:58,907 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T19:27:58,907 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-16T19:27:58,907 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-16T19:27:58,908 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-16T19:27:58,908 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T19:27:58,908 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-16T19:27:58,909 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-16T19:28:03,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42783 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T19:28:03,750 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-16T19:28:03,750 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-16T19:28:03,762 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-16T19:28:03,762 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be. 2024-11-16T19:28:03,777 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:28:03,781 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T19:28:03,782 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T19:28:03,782 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T19:28:03,782 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T19:28:03,782 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21ddd8f9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/hadoop.log.dir/,AVAILABLE} 2024-11-16T19:28:03,783 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d0f4a9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T19:28:03,878 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@730725ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/java.io.tmpdir/jetty-localhost-41515-hadoop-hdfs-3_4_1-tests_jar-_-any-13304991619898645257/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:28:03,878 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@20e1b523{HTTP/1.1, (http/1.1)}{localhost:41515} 2024-11-16T19:28:03,878 INFO [Time-limited test {}] server.Server(415): Started @110532ms 2024-11-16T19:28:03,880 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T19:28:03,908 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:28:03,912 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T19:28:03,913 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T19:28:03,913 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T19:28:03,913 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T19:28:03,913 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@83c4e47{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/hadoop.log.dir/,AVAILABLE} 2024-11-16T19:28:03,914 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@519de6b2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T19:28:03,939 WARN [Thread-822 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data5/current/BP-1424824963-172.17.0.2-1731785271598/current, will proceed with Du for space computation calculation, 2024-11-16T19:28:03,939 WARN [Thread-823 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data6/current/BP-1424824963-172.17.0.2-1731785271598/current, will proceed with Du for space computation calculation, 2024-11-16T19:28:03,953 WARN [Thread-802 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T19:28:03,955 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x23dba588c344d4b9 with lease ID 0xf34050bba09898e9: Processing first storage report for DS-aa473454-493d-4d15-b8df-a374168d11df from datanode DatanodeRegistration(127.0.0.1:32871, datanodeUuid=82cc5cca-2971-40d1-8476-8f370f58b096, infoPort=37021, infoSecurePort=0, ipcPort=43569, storageInfo=lv=-57;cid=testClusterID;nsid=714393905;c=1731785271598) 2024-11-16T19:28:03,955 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x23dba588c344d4b9 with lease ID 0xf34050bba09898e9: from storage DS-aa473454-493d-4d15-b8df-a374168d11df node DatanodeRegistration(127.0.0.1:32871, datanodeUuid=82cc5cca-2971-40d1-8476-8f370f58b096, infoPort=37021, infoSecurePort=0, ipcPort=43569, storageInfo=lv=-57;cid=testClusterID;nsid=714393905;c=1731785271598), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:28:03,956 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x23dba588c344d4b9 with lease ID 0xf34050bba09898e9: Processing first storage report for DS-185eef72-6b91-41b8-9310-033375521719 from datanode DatanodeRegistration(127.0.0.1:32871, datanodeUuid=82cc5cca-2971-40d1-8476-8f370f58b096, infoPort=37021, infoSecurePort=0, ipcPort=43569, storageInfo=lv=-57;cid=testClusterID;nsid=714393905;c=1731785271598) 2024-11-16T19:28:03,956 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x23dba588c344d4b9 with lease ID 0xf34050bba09898e9: from storage DS-185eef72-6b91-41b8-9310-033375521719 node DatanodeRegistration(127.0.0.1:32871, datanodeUuid=82cc5cca-2971-40d1-8476-8f370f58b096, infoPort=37021, infoSecurePort=0, ipcPort=43569, storageInfo=lv=-57;cid=testClusterID;nsid=714393905;c=1731785271598), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:28:04,011 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4438143d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/java.io.tmpdir/jetty-localhost-35891-hadoop-hdfs-3_4_1-tests_jar-_-any-12435572766244673212/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:28:04,011 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@32dac098{HTTP/1.1, (http/1.1)}{localhost:35891} 2024-11-16T19:28:04,011 INFO [Time-limited test {}] server.Server(415): Started @110665ms 2024-11-16T19:28:04,013 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T19:28:04,045 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:28:04,051 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T19:28:04,052 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T19:28:04,052 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T19:28:04,052 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T19:28:04,053 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3dc0bdb3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/hadoop.log.dir/,AVAILABLE} 2024-11-16T19:28:04,054 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@50ff2063{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T19:28:04,078 WARN [Thread-857 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data7/current/BP-1424824963-172.17.0.2-1731785271598/current, will proceed with Du for space computation calculation, 2024-11-16T19:28:04,078 WARN [Thread-858 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data8/current/BP-1424824963-172.17.0.2-1731785271598/current, will proceed with Du for space computation calculation, 2024-11-16T19:28:04,101 WARN [Thread-837 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T19:28:04,103 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6af35affe8bc815c with lease ID 0xf34050bba09898ea: Processing first storage report for DS-61da5b1f-894e-4c9a-915d-cea845e6a001 from datanode DatanodeRegistration(127.0.0.1:39343, datanodeUuid=22102ef6-fd58-489e-a129-078243a3c26e, infoPort=44683, infoSecurePort=0, ipcPort=44113, storageInfo=lv=-57;cid=testClusterID;nsid=714393905;c=1731785271598) 2024-11-16T19:28:04,103 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6af35affe8bc815c with lease ID 0xf34050bba09898ea: from storage DS-61da5b1f-894e-4c9a-915d-cea845e6a001 node DatanodeRegistration(127.0.0.1:39343, datanodeUuid=22102ef6-fd58-489e-a129-078243a3c26e, infoPort=44683, infoSecurePort=0, ipcPort=44113, storageInfo=lv=-57;cid=testClusterID;nsid=714393905;c=1731785271598), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:28:04,103 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6af35affe8bc815c with lease ID 0xf34050bba09898ea: Processing first storage report for DS-95e19578-fd84-445b-a714-4197e32e10e7 from datanode DatanodeRegistration(127.0.0.1:39343, datanodeUuid=22102ef6-fd58-489e-a129-078243a3c26e, infoPort=44683, infoSecurePort=0, ipcPort=44113, storageInfo=lv=-57;cid=testClusterID;nsid=714393905;c=1731785271598) 2024-11-16T19:28:04,103 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6af35affe8bc815c with lease ID 0xf34050bba09898ea: from storage DS-95e19578-fd84-445b-a714-4197e32e10e7 node DatanodeRegistration(127.0.0.1:39343, datanodeUuid=22102ef6-fd58-489e-a129-078243a3c26e, infoPort=44683, infoSecurePort=0, ipcPort=44113, storageInfo=lv=-57;cid=testClusterID;nsid=714393905;c=1731785271598), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:28:04,157 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4c81b75d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/java.io.tmpdir/jetty-localhost-35877-hadoop-hdfs-3_4_1-tests_jar-_-any-2773658351633546439/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:28:04,157 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7f55aa3b{HTTP/1.1, (http/1.1)}{localhost:35877} 2024-11-16T19:28:04,158 INFO [Time-limited test {}] server.Server(415): Started @110812ms 2024-11-16T19:28:04,159 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T19:28:04,215 WARN [Thread-883 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data9/current/BP-1424824963-172.17.0.2-1731785271598/current, will proceed with Du for space computation calculation, 2024-11-16T19:28:04,216 WARN [Thread-884 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data10/current/BP-1424824963-172.17.0.2-1731785271598/current, will proceed with Du for space computation calculation, 2024-11-16T19:28:04,237 WARN [Thread-872 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T19:28:04,239 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x32b3b40418c9ebb9 with lease ID 0xf34050bba09898eb: Processing first storage report for DS-12a5ff31-91b9-4c6a-9760-f2cc12c0df6d from datanode DatanodeRegistration(127.0.0.1:33499, datanodeUuid=610fb1e1-0ee3-41c4-8b6b-5fcbf217455a, infoPort=44743, infoSecurePort=0, ipcPort=44543, storageInfo=lv=-57;cid=testClusterID;nsid=714393905;c=1731785271598) 2024-11-16T19:28:04,240 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x32b3b40418c9ebb9 with lease ID 0xf34050bba09898eb: from storage DS-12a5ff31-91b9-4c6a-9760-f2cc12c0df6d node DatanodeRegistration(127.0.0.1:33499, datanodeUuid=610fb1e1-0ee3-41c4-8b6b-5fcbf217455a, infoPort=44743, infoSecurePort=0, ipcPort=44543, storageInfo=lv=-57;cid=testClusterID;nsid=714393905;c=1731785271598), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:28:04,240 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x32b3b40418c9ebb9 with lease ID 0xf34050bba09898eb: Processing first storage report for DS-430b3324-dee9-4d57-a6bb-66a061426d21 from datanode DatanodeRegistration(127.0.0.1:33499, datanodeUuid=610fb1e1-0ee3-41c4-8b6b-5fcbf217455a, infoPort=44743, infoSecurePort=0, ipcPort=44543, storageInfo=lv=-57;cid=testClusterID;nsid=714393905;c=1731785271598) 2024-11-16T19:28:04,240 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x32b3b40418c9ebb9 with lease ID 0xf34050bba09898eb: from storage DS-430b3324-dee9-4d57-a6bb-66a061426d21 node DatanodeRegistration(127.0.0.1:33499, datanodeUuid=610fb1e1-0ee3-41c4-8b6b-5fcbf217455a, infoPort=44743, infoSecurePort=0, ipcPort=44543, storageInfo=lv=-57;cid=testClusterID;nsid=714393905;c=1731785271598), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:28:04,278 WARN [ResponseProcessor for block BP-1424824963-172.17.0.2-1731785271598:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1424824963-172.17.0.2-1731785271598:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:04,278 WARN [ResponseProcessor for block BP-1424824963-172.17.0.2-1731785271598:blk_1073741832_1008 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1424824963-172.17.0.2-1731785271598:blk_1073741832_1008 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:04,278 WARN [ResponseProcessor for block BP-1424824963-172.17.0.2-1731785271598:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1424824963-172.17.0.2-1731785271598:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:04,279 WARN [ResponseProcessor for block BP-1424824963-172.17.0.2-1731785271598:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1424824963-172.17.0.2-1731785271598:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1424824963-172.17.0.2-1731785271598:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:44959,DS-e9ee80de-25e2-4782-abd9-24b5fa243d58,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:04,279 WARN [DataStreamer for file /user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/WALs/d11ab77873cb,42783,1731785272235/d11ab77873cb%2C42783%2C1731785272235.1731785272443 block BP-1424824963-172.17.0.2-1731785271598:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44959,DS-e9ee80de-25e2-4782-abd9-24b5fa243d58,DISK], DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44959,DS-e9ee80de-25e2-4782-abd9-24b5fa243d58,DISK]) is bad. 2024-11-16T19:28:04,279 WARN [DataStreamer for file /user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 block BP-1424824963-172.17.0.2-1731785271598:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44959,DS-e9ee80de-25e2-4782-abd9-24b5fa243d58,DISK], DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44959,DS-e9ee80de-25e2-4782-abd9-24b5fa243d58,DISK]) is bad. 2024-11-16T19:28:04,280 WARN [DataStreamer for file /user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta block BP-1424824963-172.17.0.2-1731785271598:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK], DatanodeInfoWithStorage[127.0.0.1:44959,DS-e9ee80de-25e2-4782-abd9-24b5fa243d58,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44959,DS-e9ee80de-25e2-4782-abd9-24b5fa243d58,DISK]) is bad. 2024-11-16T19:28:04,280 WARN [DataStreamer for file /user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.1731785272823 block BP-1424824963-172.17.0.2-1731785271598:blk_1073741832_1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741832_1008 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44959,DS-e9ee80de-25e2-4782-abd9-24b5fa243d58,DISK], DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44959,DS-e9ee80de-25e2-4782-abd9-24b5fa243d58,DISK]) is bad. 2024-11-16T19:28:04,280 WARN [PacketResponder: BP-1424824963-172.17.0.2-1731785271598:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:44959] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:04,281 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:50780 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:46515:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50780 dst: /127.0.0.1:46515 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:04,281 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:44648 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741832_1008] {}] datanode.DataXceiver(331): 127.0.0.1:44959:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44648 dst: /127.0.0.1:44959 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:04,281 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2028622859_22 at /127.0.0.1:44620 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44959:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44620 dst: /127.0.0.1:44959 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:04,282 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-525120902_22 at /127.0.0.1:50808 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:46515:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50808 dst: /127.0.0.1:46515 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:04,282 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2028622859_22 at /127.0.0.1:50740 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46515:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50740 dst: /127.0.0.1:46515 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:04,282 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:50764 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741832_1008] {}] datanode.DataXceiver(331): 127.0.0.1:46515:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50764 dst: /127.0.0.1:46515 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:04,283 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-525120902_22 at /127.0.0.1:44708 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:44959:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44708 dst: /127.0.0.1:44959 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:04,283 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:44674 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44959:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44674 dst: /127.0.0.1:44959 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:04,286 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@52be898{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:28:04,287 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@520d17ab{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T19:28:04,287 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T19:28:04,287 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@39a69c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T19:28:04,288 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d3d4ef0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/hadoop.log.dir/,STOPPED} 2024-11-16T19:28:04,289 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T19:28:04,289 WARN [BP-1424824963-172.17.0.2-1731785271598 heartbeating to localhost/127.0.0.1:41599 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T19:28:04,289 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T19:28:04,289 WARN [BP-1424824963-172.17.0.2-1731785271598 heartbeating to localhost/127.0.0.1:41599 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1424824963-172.17.0.2-1731785271598 (Datanode Uuid b65ae952-8681-4d98-9cea-83182561fe12) service to localhost/127.0.0.1:41599 2024-11-16T19:28:04,290 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data3/current/BP-1424824963-172.17.0.2-1731785271598 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:28:04,290 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data4/current/BP-1424824963-172.17.0.2-1731785271598 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:28:04,290 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T19:28:04,294 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@6fdb820b {}] datanode.DataXceiver(331): 127.0.0.1:46515:DataXceiver error processing unknown operation src: /127.0.0.1:60096 dst: /127.0.0.1:46515 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:04,294 WARN [DataStreamer for file /user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 block BP-1424824963-172.17.0.2-1731785271598:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:04,295 WARN [DataStreamer for file /user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/WALs/d11ab77873cb,42783,1731785272235/d11ab77873cb%2C42783%2C1731785272235.1731785272443 block BP-1424824963-172.17.0.2-1731785271598:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:04,298 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@f1f9cf1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:28:04,298 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2df55a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T19:28:04,298 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T19:28:04,298 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5bb4f47b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T19:28:04,299 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35a03ba9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/hadoop.log.dir/,STOPPED} 2024-11-16T19:28:04,299 WARN [DataStreamer for file /user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.1731785272823 block BP-1424824963-172.17.0.2-1731785271598:blk_1073741832_1008 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741832_1008 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:04,299 WARN [DataStreamer for file /user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta block BP-1424824963-172.17.0.2-1731785271598:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:04,300 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T19:28:04,300 WARN [BP-1424824963-172.17.0.2-1731785271598 heartbeating to localhost/127.0.0.1:41599 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T19:28:04,300 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T19:28:04,300 WARN [BP-1424824963-172.17.0.2-1731785271598 heartbeating to localhost/127.0.0.1:41599 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1424824963-172.17.0.2-1731785271598 (Datanode Uuid c845b32f-7331-4469-af57-91259711f9b4) service to localhost/127.0.0.1:41599 2024-11-16T19:28:04,300 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data1/current/BP-1424824963-172.17.0.2-1731785271598 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:28:04,301 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data2/current/BP-1424824963-172.17.0.2-1731785271598 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:28:04,301 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T19:28:04,304 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be., hostname=d11ab77873cb,36045,1731785272299, seqNum=2] 2024-11-16T19:28:04,306 ERROR [FSHLog-0-hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4-prefix:d11ab77873cb,36045,1731785272299 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:04,306 WARN [FSHLog-0-hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4-prefix:d11ab77873cb,36045,1731785272299 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:04,306 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:04,306 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d11ab77873cb%2C36045%2C1731785272299:(num 1731785272823) roll requested 2024-11-16T19:28:04,306 INFO [regionserver/d11ab77873cb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C36045%2C1731785272299.1731785284306 2024-11-16T19:28:04,311 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:04,311 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:04,312 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:04,312 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:04,312 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:04,312 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.1731785272823 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.1731785284306 2024-11-16T19:28:04,312 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:04,312 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:04,313 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44683:44683),(127.0.0.1/127.0.0.1:37021:37021)] 2024-11-16T19:28:04,313 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.1731785272823 is not closed yet, will try archiving it next time 2024-11-16T19:28:04,313 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-16T19:28:04,314 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-16T19:28:04,314 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.1731785272823 2024-11-16T19:28:04,316 WARN [IPC Server handler 0 on default port 41599 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.1731785272823 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741832_1008 2024-11-16T19:28:04,319 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.1731785272823 after 4ms 2024-11-16T19:28:04,404 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:05,629 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:06,313 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:06,314 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.1731785284306 2024-11-16T19:28:06,315 WARN [ResponseProcessor for block BP-1424824963-172.17.0.2-1731785271598:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1424824963-172.17.0.2-1731785271598:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:06,315 WARN [DataStreamer for file /user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.1731785284306 block BP-1424824963-172.17.0.2-1731785271598:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39343,DS-61da5b1f-894e-4c9a-915d-cea845e6a001,DISK], DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39343,DS-61da5b1f-894e-4c9a-915d-cea845e6a001,DISK]) is bad. 2024-11-16T19:28:06,315 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:35010 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:39343:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35010 dst: /127.0.0.1:39343 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:06,316 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:35610 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:32871:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35610 dst: /127.0.0.1:32871 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:06,318 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4438143d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:28:06,318 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@32dac098{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T19:28:06,318 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T19:28:06,319 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@519de6b2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T19:28:06,319 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@83c4e47{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/hadoop.log.dir/,STOPPED} 2024-11-16T19:28:06,321 WARN [BP-1424824963-172.17.0.2-1731785271598 heartbeating to localhost/127.0.0.1:41599 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T19:28:06,321 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T19:28:06,321 WARN [BP-1424824963-172.17.0.2-1731785271598 heartbeating to localhost/127.0.0.1:41599 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1424824963-172.17.0.2-1731785271598 (Datanode Uuid 22102ef6-fd58-489e-a129-078243a3c26e) service to localhost/127.0.0.1:41599 2024-11-16T19:28:06,321 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T19:28:06,321 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data7/current/BP-1424824963-172.17.0.2-1731785271598 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:28:06,322 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data8/current/BP-1424824963-172.17.0.2-1731785271598 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:28:06,322 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T19:28:06,404 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:07,630 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:08,313 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:08,314 WARN [regionserver/d11ab77873cb:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK]] 2024-11-16T19:28:08,315 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d11ab77873cb%2C36045%2C1731785272299:(num 1731785284306) roll requested 2024-11-16T19:28:08,315 INFO [regionserver/d11ab77873cb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C36045%2C1731785272299.1731785288315 2024-11-16T19:28:08,321 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.1731785272823 after 4006ms 2024-11-16T19:28:08,322 WARN [Thread-904 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39343 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:08,322 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:35632 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741839_1021] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data6]'}, localName='127.0.0.1:32871', datanodeUuid='82cc5cca-2971-40d1-8476-8f370f58b096', xmitsInProgress=0}:Exception transferring block BP-1424824963-172.17.0.2-1731785271598:blk_1073741839_1021 to mirror 127.0.0.1:39343 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:08,323 WARN [Thread-904 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK], DatanodeInfoWithStorage[127.0.0.1:39343,DS-61da5b1f-894e-4c9a-915d-cea845e6a001,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39343,DS-61da5b1f-894e-4c9a-915d-cea845e6a001,DISK]) is bad. 2024-11-16T19:28:08,323 WARN [Thread-904 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741839_1021 2024-11-16T19:28:08,323 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:35632 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741839_1021] {}] datanode.BlockReceiver(316): Block 1073741839 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-16T19:28:08,323 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:35632 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:32871:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35632 dst: /127.0.0.1:32871 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:08,326 WARN [Thread-904 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39343,DS-61da5b1f-894e-4c9a-915d-cea845e6a001,DISK] 2024-11-16T19:28:08,327 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T19:28:08,331 WARN [Thread-904 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46515 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:08,331 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:35644 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741840_1022] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data6]'}, localName='127.0.0.1:32871', datanodeUuid='82cc5cca-2971-40d1-8476-8f370f58b096', xmitsInProgress=0}:Exception transferring block BP-1424824963-172.17.0.2-1731785271598:blk_1073741840_1022 to mirror 127.0.0.1:46515 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:08,332 WARN [Thread-904 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK], DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]) is bad. 2024-11-16T19:28:08,332 WARN [Thread-904 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741840_1022 2024-11-16T19:28:08,332 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:35644 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741840_1022] {}] datanode.BlockReceiver(316): Block 1073741840 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-16T19:28:08,332 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:35644 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:32871:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35644 dst: /127.0.0.1:32871 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:08,332 WARN [Thread-904 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK] 2024-11-16T19:28:08,337 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:08,337 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:08,337 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:08,337 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:08,337 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:08,338 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.1731785284306 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.1731785288315 2024-11-16T19:28:08,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32871 is added to blk_1073741838_1020 (size=3600) 2024-11-16T19:28:08,343 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37021:37021),(127.0.0.1/127.0.0.1:44743:44743)] 2024-11-16T19:28:08,343 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.1731785272823 is not closed yet, will try archiving it next time 2024-11-16T19:28:08,343 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.1731785284306 is not closed yet, will try archiving it next time 2024-11-16T19:28:08,405 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:08,742 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.1731785272823 is not closed yet, will try archiving it next time 2024-11-16T19:28:09,630 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:09,971 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3fa49935[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:32871, datanodeUuid=82cc5cca-2971-40d1-8476-8f370f58b096, infoPort=37021, infoSecurePort=0, ipcPort=43569, storageInfo=lv=-57;cid=testClusterID;nsid=714393905;c=1731785271598):Failed to transfer BP-1424824963-172.17.0.2-1731785271598:blk_1073741838_1020 to 127.0.0.1:46515 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:10,331 WARN [ResponseProcessor for block BP-1424824963-172.17.0.2-1731785271598:blk_1073741841_1023 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1424824963-172.17.0.2-1731785271598:blk_1073741841_1023 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:10,332 WARN [DataStreamer for file /user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.1731785288315 block BP-1424824963-172.17.0.2-1731785271598:blk_1073741841_1023 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK], DatanodeInfoWithStorage[127.0.0.1:33499,DS-12a5ff31-91b9-4c6a-9760-f2cc12c0df6d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK]) is bad. 2024-11-16T19:28:10,332 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:35646 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:32871:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35646 dst: /127.0.0.1:32871 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:10,333 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:41890 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:33499:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41890 dst: /127.0.0.1:33499 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:10,335 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@730725ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:28:10,335 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@20e1b523{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T19:28:10,335 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T19:28:10,336 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d0f4a9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T19:28:10,336 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21ddd8f9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/hadoop.log.dir/,STOPPED} 2024-11-16T19:28:10,337 WARN [BP-1424824963-172.17.0.2-1731785271598 heartbeating to localhost/127.0.0.1:41599 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T19:28:10,337 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T19:28:10,338 WARN [BP-1424824963-172.17.0.2-1731785271598 heartbeating to localhost/127.0.0.1:41599 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1424824963-172.17.0.2-1731785271598 (Datanode Uuid 82cc5cca-2971-40d1-8476-8f370f58b096) service to localhost/127.0.0.1:41599 2024-11-16T19:28:10,338 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T19:28:10,338 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data5/current/BP-1424824963-172.17.0.2-1731785271598 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:28:10,339 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data6/current/BP-1424824963-172.17.0.2-1731785271598 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:28:10,339 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T19:28:10,343 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:10,343 WARN [regionserver/d11ab77873cb:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33499,DS-12a5ff31-91b9-4c6a-9760-f2cc12c0df6d,DISK]] 2024-11-16T19:28:10,343 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d11ab77873cb%2C36045%2C1731785272299:(num 1731785288315) roll requested 2024-11-16T19:28:10,344 INFO [regionserver/d11ab77873cb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C36045%2C1731785272299.1731785290344 2024-11-16T19:28:10,348 WARN [Thread-918 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1025 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44959 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:10,348 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:43842 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741842_1025] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data10]'}, localName='127.0.0.1:33499', datanodeUuid='610fb1e1-0ee3-41c4-8b6b-5fcbf217455a', xmitsInProgress=0}:Exception transferring block BP-1424824963-172.17.0.2-1731785271598:blk_1073741842_1025 to mirror 127.0.0.1:44959 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:10,348 WARN [Thread-918 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33499,DS-12a5ff31-91b9-4c6a-9760-f2cc12c0df6d,DISK], DatanodeInfoWithStorage[127.0.0.1:44959,DS-e9ee80de-25e2-4782-abd9-24b5fa243d58,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44959,DS-e9ee80de-25e2-4782-abd9-24b5fa243d58,DISK]) is bad. 2024-11-16T19:28:10,348 WARN [Thread-918 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741842_1025 2024-11-16T19:28:10,348 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:43842 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741842_1025] {}] datanode.BlockReceiver(316): Block 1073741842 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-16T19:28:10,349 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:43842 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741842_1025] {}] datanode.DataXceiver(331): 127.0.0.1:33499:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43842 dst: /127.0.0.1:33499 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:10,349 WARN [Thread-918 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44959,DS-e9ee80de-25e2-4782-abd9-24b5fa243d58,DISK] 2024-11-16T19:28:10,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36045 {}] regionserver.HRegion(8855): Flush requested on c8746695b3d38667decb7d27505c93be 2024-11-16T19:28:10,350 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c8746695b3d38667decb7d27505c93be 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T19:28:10,352 WARN [Thread-918 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39343 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:10,353 WARN [Thread-918 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33499,DS-12a5ff31-91b9-4c6a-9760-f2cc12c0df6d,DISK], DatanodeInfoWithStorage[127.0.0.1:39343,DS-61da5b1f-894e-4c9a-915d-cea845e6a001,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39343,DS-61da5b1f-894e-4c9a-915d-cea845e6a001,DISK]) is bad. 2024-11-16T19:28:10,352 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:43858 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741843_1026] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data10]'}, localName='127.0.0.1:33499', datanodeUuid='610fb1e1-0ee3-41c4-8b6b-5fcbf217455a', xmitsInProgress=0}:Exception transferring block BP-1424824963-172.17.0.2-1731785271598:blk_1073741843_1026 to mirror 127.0.0.1:39343 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:10,353 WARN [Thread-918 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741843_1026 2024-11-16T19:28:10,353 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:43858 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741843_1026] {}] datanode.BlockReceiver(316): Block 1073741843 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-16T19:28:10,353 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:43858 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741843_1026] {}] datanode.DataXceiver(331): 127.0.0.1:33499:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43858 dst: /127.0.0.1:33499 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:10,354 WARN [Thread-918 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39343,DS-61da5b1f-894e-4c9a-915d-cea845e6a001,DISK] 2024-11-16T19:28:10,355 WARN [Thread-918 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:10,356 WARN [Thread-918 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK], DatanodeInfoWithStorage[127.0.0.1:33499,DS-12a5ff31-91b9-4c6a-9760-f2cc12c0df6d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK]) is bad. 2024-11-16T19:28:10,356 WARN [Thread-918 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741844_1027 2024-11-16T19:28:10,357 WARN [Thread-918 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK] 2024-11-16T19:28:10,362 WARN [Thread-918 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46515 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:10,362 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:43860 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741845_1028] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data10]'}, localName='127.0.0.1:33499', datanodeUuid='610fb1e1-0ee3-41c4-8b6b-5fcbf217455a', xmitsInProgress=0}:Exception transferring block BP-1424824963-172.17.0.2-1731785271598:blk_1073741845_1028 to mirror 127.0.0.1:46515 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:10,362 WARN [Thread-918 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33499,DS-12a5ff31-91b9-4c6a-9760-f2cc12c0df6d,DISK], DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]) is bad. 2024-11-16T19:28:10,362 WARN [Thread-918 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741845_1028 2024-11-16T19:28:10,362 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:43860 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741845_1028] {}] datanode.BlockReceiver(316): Block 1073741845 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-16T19:28:10,362 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:43860 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741845_1028] {}] datanode.DataXceiver(331): 127.0.0.1:33499:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43860 dst: /127.0.0.1:33499 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:10,363 WARN [Thread-918 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK] 2024-11-16T19:28:10,364 WARN [IPC Server handler 1 on default port 41599 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T19:28:10,365 WARN [IPC Server handler 1 on default port 41599 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T19:28:10,365 WARN [IPC Server handler 1 on default port 41599 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T19:28:10,370 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:10,370 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:10,370 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:10,370 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:10,370 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:10,370 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.1731785288315 with entries=12, filesize=12.96 KB; new WAL /user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.1731785290344 2024-11-16T19:28:10,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33499 is added to blk_1073741841_1024 (size=13274) 2024-11-16T19:28:10,373 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44743:44743)] 2024-11-16T19:28:10,373 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.1731785272823 is not closed yet, will try archiving it next time 2024-11-16T19:28:10,373 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.1731785288315 is not closed yet, will try archiving it next time 2024-11-16T19:28:10,374 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/.tmp/info/2a48bf96e2b64359a9416be45c98eb23 is 1080, key is row0002/info:/1731785286323/Put/seqid=0 2024-11-16T19:28:10,375 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:10,376 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK], DatanodeInfoWithStorage[127.0.0.1:39343,DS-61da5b1f-894e-4c9a-915d-cea845e6a001,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]) is bad. 2024-11-16T19:28:10,376 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741847_1030 2024-11-16T19:28:10,376 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK] 2024-11-16T19:28:10,377 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:10,377 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK], DatanodeInfoWithStorage[127.0.0.1:39343,DS-61da5b1f-894e-4c9a-915d-cea845e6a001,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK]) is bad. 2024-11-16T19:28:10,377 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741848_1031 2024-11-16T19:28:10,378 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK] 2024-11-16T19:28:10,379 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:10,379 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44959,DS-e9ee80de-25e2-4782-abd9-24b5fa243d58,DISK], DatanodeInfoWithStorage[127.0.0.1:33499,DS-12a5ff31-91b9-4c6a-9760-f2cc12c0df6d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44959,DS-e9ee80de-25e2-4782-abd9-24b5fa243d58,DISK]) is bad. 2024-11-16T19:28:10,379 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741849_1032 2024-11-16T19:28:10,379 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44959,DS-e9ee80de-25e2-4782-abd9-24b5fa243d58,DISK] 2024-11-16T19:28:10,382 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39343 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:10,382 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33499,DS-12a5ff31-91b9-4c6a-9760-f2cc12c0df6d,DISK], DatanodeInfoWithStorage[127.0.0.1:39343,DS-61da5b1f-894e-4c9a-915d-cea845e6a001,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39343,DS-61da5b1f-894e-4c9a-915d-cea845e6a001,DISK]) is bad. 2024-11-16T19:28:10,382 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:43872 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741850_1033] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data10]'}, localName='127.0.0.1:33499', datanodeUuid='610fb1e1-0ee3-41c4-8b6b-5fcbf217455a', xmitsInProgress=0}:Exception transferring block BP-1424824963-172.17.0.2-1731785271598:blk_1073741850_1033 to mirror 127.0.0.1:39343 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:10,382 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741850_1033 2024-11-16T19:28:10,382 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:43872 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741850_1033] {}] datanode.BlockReceiver(316): Block 1073741850 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T19:28:10,382 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:43872 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741850_1033] {}] datanode.DataXceiver(331): 127.0.0.1:33499:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43872 dst: /127.0.0.1:33499 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:10,382 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39343,DS-61da5b1f-894e-4c9a-915d-cea845e6a001,DISK] 2024-11-16T19:28:10,383 WARN [IPC Server handler 3 on default port 41599 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T19:28:10,383 WARN [IPC Server handler 3 on default port 41599 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T19:28:10,383 WARN [IPC Server handler 3 on default port 41599 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T19:28:10,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33499 is added to blk_1073741851_1034 (size=10347) 2024-11-16T19:28:10,405 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:10,773 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.1731785272823 is not closed yet, will try archiving it next time 2024-11-16T19:28:10,787 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/.tmp/info/2a48bf96e2b64359a9416be45c98eb23 2024-11-16T19:28:10,798 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/.tmp/info/2a48bf96e2b64359a9416be45c98eb23 as hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/2a48bf96e2b64359a9416be45c98eb23 2024-11-16T19:28:10,804 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/2a48bf96e2b64359a9416be45c98eb23, entries=5, sequenceid=11, filesize=10.1 K 2024-11-16T19:28:10,805 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for c8746695b3d38667decb7d27505c93be in 455ms, sequenceid=11, compaction requested=false 2024-11-16T19:28:10,805 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c8746695b3d38667decb7d27505c93be: 2024-11-16T19:28:10,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36045 {}] regionserver.HRegion(8855): Flush requested on c8746695b3d38667decb7d27505c93be 2024-11-16T19:28:10,985 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c8746695b3d38667decb7d27505c93be 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-16T19:28:10,993 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/.tmp/info/ed85c619ece7495d8e4b3d7d4d6cd859 is 1080, key is row0007/info:/1731785290352/Put/seqid=0 2024-11-16T19:28:10,996 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:10,996 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44959,DS-e9ee80de-25e2-4782-abd9-24b5fa243d58,DISK], DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44959,DS-e9ee80de-25e2-4782-abd9-24b5fa243d58,DISK]) is bad. 2024-11-16T19:28:10,996 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741852_1035 2024-11-16T19:28:10,997 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44959,DS-e9ee80de-25e2-4782-abd9-24b5fa243d58,DISK] 2024-11-16T19:28:10,998 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:10,998 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39343,DS-61da5b1f-894e-4c9a-915d-cea845e6a001,DISK], DatanodeInfoWithStorage[127.0.0.1:33499,DS-12a5ff31-91b9-4c6a-9760-f2cc12c0df6d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39343,DS-61da5b1f-894e-4c9a-915d-cea845e6a001,DISK]) is bad. 2024-11-16T19:28:10,998 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741853_1036 2024-11-16T19:28:10,999 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39343,DS-61da5b1f-894e-4c9a-915d-cea845e6a001,DISK] 2024-11-16T19:28:11,000 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:11,000 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK], DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]) is bad. 2024-11-16T19:28:11,000 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741854_1037 2024-11-16T19:28:11,000 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK] 2024-11-16T19:28:11,001 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:11,002 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK], DatanodeInfoWithStorage[127.0.0.1:33499,DS-12a5ff31-91b9-4c6a-9760-f2cc12c0df6d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK]) is bad. 2024-11-16T19:28:11,002 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741855_1038 2024-11-16T19:28:11,002 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK] 2024-11-16T19:28:11,003 WARN [IPC Server handler 2 on default port 41599 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T19:28:11,003 WARN [IPC Server handler 2 on default port 41599 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T19:28:11,003 WARN [IPC Server handler 2 on default port 41599 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T19:28:11,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33499 is added to blk_1073741856_1039 (size=12506) 2024-11-16T19:28:11,407 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/.tmp/info/ed85c619ece7495d8e4b3d7d4d6cd859 2024-11-16T19:28:11,414 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/.tmp/info/ed85c619ece7495d8e4b3d7d4d6cd859 as hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/ed85c619ece7495d8e4b3d7d4d6cd859 2024-11-16T19:28:11,421 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/ed85c619ece7495d8e4b3d7d4d6cd859, entries=7, sequenceid=24, filesize=12.2 K 2024-11-16T19:28:11,422 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for c8746695b3d38667decb7d27505c93be in 438ms, sequenceid=24, compaction requested=false 2024-11-16T19:28:11,422 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c8746695b3d38667decb7d27505c93be: 2024-11-16T19:28:11,422 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-16T19:28:11,422 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T19:28:11,422 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/ed85c619ece7495d8e4b3d7d4d6cd859 because midkey is the same as first or last row 2024-11-16T19:28:11,630 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:12,374 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:12,374 WARN [regionserver/d11ab77873cb:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33499,DS-12a5ff31-91b9-4c6a-9760-f2cc12c0df6d,DISK]] 2024-11-16T19:28:12,374 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d11ab77873cb%2C36045%2C1731785272299:(num 1731785290344) roll requested 2024-11-16T19:28:12,374 INFO [regionserver/d11ab77873cb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C36045%2C1731785272299.1731785292374 2024-11-16T19:28:12,377 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:12,377 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK], DatanodeInfoWithStorage[127.0.0.1:33499,DS-12a5ff31-91b9-4c6a-9760-f2cc12c0df6d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK]) is bad. 2024-11-16T19:28:12,377 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741857_1040 2024-11-16T19:28:12,378 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK] 2024-11-16T19:28:12,381 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46515 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:12,381 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:43902 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741858_1041] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data10]'}, localName='127.0.0.1:33499', datanodeUuid='610fb1e1-0ee3-41c4-8b6b-5fcbf217455a', xmitsInProgress=0}:Exception transferring block BP-1424824963-172.17.0.2-1731785271598:blk_1073741858_1041 to mirror 127.0.0.1:46515 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:12,381 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33499,DS-12a5ff31-91b9-4c6a-9760-f2cc12c0df6d,DISK], DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]) is bad. 2024-11-16T19:28:12,381 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741858_1041 2024-11-16T19:28:12,381 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:43902 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741858_1041] {}] datanode.BlockReceiver(316): Block 1073741858 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-16T19:28:12,381 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:43902 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741858_1041] {}] datanode.DataXceiver(331): 127.0.0.1:33499:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43902 dst: /127.0.0.1:33499 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:12,382 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK] 2024-11-16T19:28:12,384 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39343 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:12,384 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:43908 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741859_1042] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data10]'}, localName='127.0.0.1:33499', datanodeUuid='610fb1e1-0ee3-41c4-8b6b-5fcbf217455a', xmitsInProgress=0}:Exception transferring block BP-1424824963-172.17.0.2-1731785271598:blk_1073741859_1042 to mirror 127.0.0.1:39343 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:12,384 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33499,DS-12a5ff31-91b9-4c6a-9760-f2cc12c0df6d,DISK], DatanodeInfoWithStorage[127.0.0.1:39343,DS-61da5b1f-894e-4c9a-915d-cea845e6a001,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39343,DS-61da5b1f-894e-4c9a-915d-cea845e6a001,DISK]) is bad. 2024-11-16T19:28:12,384 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741859_1042 2024-11-16T19:28:12,384 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:43908 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741859_1042] {}] datanode.BlockReceiver(316): Block 1073741859 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-16T19:28:12,384 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:43908 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741859_1042] {}] datanode.DataXceiver(331): 127.0.0.1:33499:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43908 dst: /127.0.0.1:33499 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:12,385 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39343,DS-61da5b1f-894e-4c9a-915d-cea845e6a001,DISK] 2024-11-16T19:28:12,386 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:12,387 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44959,DS-e9ee80de-25e2-4782-abd9-24b5fa243d58,DISK], DatanodeInfoWithStorage[127.0.0.1:33499,DS-12a5ff31-91b9-4c6a-9760-f2cc12c0df6d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44959,DS-e9ee80de-25e2-4782-abd9-24b5fa243d58,DISK]) is bad. 2024-11-16T19:28:12,387 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741860_1043 2024-11-16T19:28:12,387 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44959,DS-e9ee80de-25e2-4782-abd9-24b5fa243d58,DISK] 2024-11-16T19:28:12,388 WARN [IPC Server handler 1 on default port 41599 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T19:28:12,388 WARN [IPC Server handler 1 on default port 41599 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T19:28:12,388 WARN [IPC Server handler 1 on default port 41599 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T19:28:12,391 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:12,391 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:12,391 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:12,392 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:12,392 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:12,392 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.1731785290344 with entries=12, filesize=11.36 KB; new WAL /user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.1731785292374 2024-11-16T19:28:12,393 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44743:44743)] 2024-11-16T19:28:12,393 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.1731785272823 is not closed yet, will try archiving it next time 2024-11-16T19:28:12,393 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.1731785290344 is not closed yet, will try archiving it next time 2024-11-16T19:28:12,394 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.1731785284306 to hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/oldWALs/d11ab77873cb%2C36045%2C1731785272299.1731785284306 2024-11-16T19:28:12,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33499 is added to blk_1073741846_1029 (size=11642) 2024-11-16T19:28:12,395 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.1731785288315 to hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/oldWALs/d11ab77873cb%2C36045%2C1731785272299.1731785288315 2024-11-16T19:28:12,406 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:12,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36045 {}] regionserver.HRegion(8855): Flush requested on c8746695b3d38667decb7d27505c93be 2024-11-16T19:28:12,411 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c8746695b3d38667decb7d27505c93be 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-16T19:28:12,415 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/.tmp/info/65c43295823440dbba867bde6886a62b is 1079, key is tmprow/info:/1731785292410/Put/seqid=0 2024-11-16T19:28:12,418 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32871 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:12,418 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:43924 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741862_1045] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data10]'}, localName='127.0.0.1:33499', datanodeUuid='610fb1e1-0ee3-41c4-8b6b-5fcbf217455a', xmitsInProgress=0}:Exception transferring block BP-1424824963-172.17.0.2-1731785271598:blk_1073741862_1045 to mirror 127.0.0.1:32871 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:12,418 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33499,DS-12a5ff31-91b9-4c6a-9760-f2cc12c0df6d,DISK], DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK]) is bad. 2024-11-16T19:28:12,418 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741862_1045 2024-11-16T19:28:12,418 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:43924 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741862_1045] {}] datanode.BlockReceiver(316): Block 1073741862 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T19:28:12,418 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:43924 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741862_1045] {}] datanode.DataXceiver(331): 127.0.0.1:33499:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43924 dst: /127.0.0.1:33499 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:12,418 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK] 2024-11-16T19:28:12,419 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:12,420 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39343,DS-61da5b1f-894e-4c9a-915d-cea845e6a001,DISK], DatanodeInfoWithStorage[127.0.0.1:33499,DS-12a5ff31-91b9-4c6a-9760-f2cc12c0df6d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39343,DS-61da5b1f-894e-4c9a-915d-cea845e6a001,DISK]) is bad. 2024-11-16T19:28:12,420 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741863_1046 2024-11-16T19:28:12,420 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39343,DS-61da5b1f-894e-4c9a-915d-cea845e6a001,DISK] 2024-11-16T19:28:12,421 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:12,421 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44959,DS-e9ee80de-25e2-4782-abd9-24b5fa243d58,DISK], DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44959,DS-e9ee80de-25e2-4782-abd9-24b5fa243d58,DISK]) is bad. 2024-11-16T19:28:12,421 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741864_1047 2024-11-16T19:28:12,422 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44959,DS-e9ee80de-25e2-4782-abd9-24b5fa243d58,DISK] 2024-11-16T19:28:12,423 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46515 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:12,423 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:43928 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741865_1048] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data10]'}, localName='127.0.0.1:33499', datanodeUuid='610fb1e1-0ee3-41c4-8b6b-5fcbf217455a', xmitsInProgress=0}:Exception transferring block BP-1424824963-172.17.0.2-1731785271598:blk_1073741865_1048 to mirror 127.0.0.1:46515 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:12,424 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33499,DS-12a5ff31-91b9-4c6a-9760-f2cc12c0df6d,DISK], DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]) is bad. 2024-11-16T19:28:12,424 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741865_1048 2024-11-16T19:28:12,424 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:43928 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741865_1048] {}] datanode.BlockReceiver(316): Block 1073741865 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T19:28:12,424 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:43928 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741865_1048] {}] datanode.DataXceiver(331): 127.0.0.1:33499:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43928 dst: /127.0.0.1:33499 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:12,424 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK] 2024-11-16T19:28:12,425 WARN [IPC Server handler 0 on default port 41599 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T19:28:12,425 WARN [IPC Server handler 0 on default port 41599 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T19:28:12,425 WARN [IPC Server handler 0 on default port 41599 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T19:28:12,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33499 is added to blk_1073741866_1049 (size=6027) 2024-11-16T19:28:12,795 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.1731785272823 is not closed yet, will try archiving it next time 2024-11-16T19:28:12,829 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/.tmp/info/65c43295823440dbba867bde6886a62b 2024-11-16T19:28:12,835 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/.tmp/info/65c43295823440dbba867bde6886a62b as hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/65c43295823440dbba867bde6886a62b 2024-11-16T19:28:12,841 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/65c43295823440dbba867bde6886a62b, entries=1, sequenceid=34, filesize=5.9 K 2024-11-16T19:28:12,842 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for c8746695b3d38667decb7d27505c93be in 431ms, sequenceid=34, compaction requested=true 2024-11-16T19:28:12,842 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c8746695b3d38667decb7d27505c93be: 2024-11-16T19:28:12,843 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-16T19:28:12,843 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T19:28:12,843 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/ed85c619ece7495d8e4b3d7d4d6cd859 because midkey is the same as first or last row 2024-11-16T19:28:12,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8746695b3d38667decb7d27505c93be:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T19:28:12,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T19:28:12,843 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T19:28:12,845 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T19:28:12,845 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.HStore(1541): c8746695b3d38667decb7d27505c93be/info is initiating minor compaction (all files) 2024-11-16T19:28:12,845 INFO [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c8746695b3d38667decb7d27505c93be/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be. 2024-11-16T19:28:12,845 INFO [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/2a48bf96e2b64359a9416be45c98eb23, hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/ed85c619ece7495d8e4b3d7d4d6cd859, hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/65c43295823440dbba867bde6886a62b] into tmpdir=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/.tmp, totalSize=28.2 K 2024-11-16T19:28:12,846 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2a48bf96e2b64359a9416be45c98eb23, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731785286323 2024-11-16T19:28:12,846 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] compactions.Compactor(225): Compacting ed85c619ece7495d8e4b3d7d4d6cd859, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1731785290352 2024-11-16T19:28:12,847 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] compactions.Compactor(225): Compacting 65c43295823440dbba867bde6886a62b, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731785292410 2024-11-16T19:28:12,864 INFO [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8746695b3d38667decb7d27505c93be#info#compaction#21 average throughput is 4.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T19:28:12,865 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/.tmp/info/fb602438c6d848c7a3c6b00eb9f38e78 is 1080, key is row0002/info:/1731785286323/Put/seqid=0 2024-11-16T19:28:12,868 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39343 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:12,868 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:43972 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741867_1050] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data10]'}, localName='127.0.0.1:33499', datanodeUuid='610fb1e1-0ee3-41c4-8b6b-5fcbf217455a', xmitsInProgress=0}:Exception transferring block BP-1424824963-172.17.0.2-1731785271598:blk_1073741867_1050 to mirror 127.0.0.1:39343 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:12,868 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33499,DS-12a5ff31-91b9-4c6a-9760-f2cc12c0df6d,DISK], DatanodeInfoWithStorage[127.0.0.1:39343,DS-61da5b1f-894e-4c9a-915d-cea845e6a001,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39343,DS-61da5b1f-894e-4c9a-915d-cea845e6a001,DISK]) is bad. 2024-11-16T19:28:12,868 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741867_1050 2024-11-16T19:28:12,868 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:43972 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741867_1050] {}] datanode.BlockReceiver(316): Block 1073741867 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T19:28:12,868 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:43972 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741867_1050] {}] datanode.DataXceiver(331): 127.0.0.1:33499:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43972 dst: /127.0.0.1:33499 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:12,869 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39343,DS-61da5b1f-894e-4c9a-915d-cea845e6a001,DISK] 2024-11-16T19:28:12,871 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44959 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:12,871 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:43976 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741868_1051] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data10]'}, localName='127.0.0.1:33499', datanodeUuid='610fb1e1-0ee3-41c4-8b6b-5fcbf217455a', xmitsInProgress=0}:Exception transferring block BP-1424824963-172.17.0.2-1731785271598:blk_1073741868_1051 to mirror 127.0.0.1:44959 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:12,872 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33499,DS-12a5ff31-91b9-4c6a-9760-f2cc12c0df6d,DISK], DatanodeInfoWithStorage[127.0.0.1:44959,DS-e9ee80de-25e2-4782-abd9-24b5fa243d58,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44959,DS-e9ee80de-25e2-4782-abd9-24b5fa243d58,DISK]) is bad. 2024-11-16T19:28:12,872 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741868_1051 2024-11-16T19:28:12,872 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:43976 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741868_1051] {}] datanode.BlockReceiver(316): Block 1073741868 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T19:28:12,872 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:43976 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741868_1051] {}] datanode.DataXceiver(331): 127.0.0.1:33499:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43976 dst: /127.0.0.1:33499 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:12,872 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44959,DS-e9ee80de-25e2-4782-abd9-24b5fa243d58,DISK] 2024-11-16T19:28:12,874 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:12,874 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK], DatanodeInfoWithStorage[127.0.0.1:33499,DS-12a5ff31-91b9-4c6a-9760-f2cc12c0df6d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]) is bad. 2024-11-16T19:28:12,874 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741869_1052 2024-11-16T19:28:12,875 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK] 2024-11-16T19:28:12,876 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:12,876 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK], DatanodeInfoWithStorage[127.0.0.1:33499,DS-12a5ff31-91b9-4c6a-9760-f2cc12c0df6d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK]) is bad. 2024-11-16T19:28:12,876 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741870_1053 2024-11-16T19:28:12,877 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK] 2024-11-16T19:28:12,878 WARN [IPC Server handler 4 on default port 41599 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T19:28:12,878 WARN [IPC Server handler 4 on default port 41599 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T19:28:12,878 WARN [IPC Server handler 4 on default port 41599 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T19:28:12,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33499 is added to blk_1073741871_1054 (size=17994) 2024-11-16T19:28:13,240 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@15e85e4c[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33499, datanodeUuid=610fb1e1-0ee3-41c4-8b6b-5fcbf217455a, infoPort=44743, infoSecurePort=0, ipcPort=44543, storageInfo=lv=-57;cid=testClusterID;nsid=714393905;c=1731785271598):Failed to transfer BP-1424824963-172.17.0.2-1731785271598:blk_1073741841_1024 to 127.0.0.1:44959 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:13,240 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1b2fb281[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33499, datanodeUuid=610fb1e1-0ee3-41c4-8b6b-5fcbf217455a, infoPort=44743, infoSecurePort=0, ipcPort=44543, storageInfo=lv=-57;cid=testClusterID;nsid=714393905;c=1731785271598):Failed to transfer BP-1424824963-172.17.0.2-1731785271598:blk_1073741851_1034 to 127.0.0.1:46515 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:13,290 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/.tmp/info/fb602438c6d848c7a3c6b00eb9f38e78 as hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/fb602438c6d848c7a3c6b00eb9f38e78 2024-11-16T19:28:13,300 INFO [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c8746695b3d38667decb7d27505c93be/info of c8746695b3d38667decb7d27505c93be into fb602438c6d848c7a3c6b00eb9f38e78(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T19:28:13,300 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c8746695b3d38667decb7d27505c93be: 2024-11-16T19:28:13,300 INFO [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be., storeName=c8746695b3d38667decb7d27505c93be/info, priority=13, startTime=1731785292843; duration=0sec 2024-11-16T19:28:13,300 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-16T19:28:13,300 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T19:28:13,301 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/fb602438c6d848c7a3c6b00eb9f38e78 because midkey is the same as first or last row 2024-11-16T19:28:13,301 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-16T19:28:13,301 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T19:28:13,301 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/fb602438c6d848c7a3c6b00eb9f38e78 because midkey is the same as first or last row 2024-11-16T19:28:13,301 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-16T19:28:13,301 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T19:28:13,301 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/fb602438c6d848c7a3c6b00eb9f38e78 because midkey is the same as first or last row 2024-11-16T19:28:13,301 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T19:28:13,301 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8746695b3d38667decb7d27505c93be:info 2024-11-16T19:28:13,631 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:13,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36045 {}] regionserver.HRegion(8855): Flush requested on c8746695b3d38667decb7d27505c93be 2024-11-16T19:28:13,847 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c8746695b3d38667decb7d27505c93be 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-16T19:28:13,856 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/.tmp/info/5adecd86e3854862b946f3bf93ed5b83 is 1079, key is tmprow/info:/1731785293846/Put/seqid=0 2024-11-16T19:28:13,859 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:13,859 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK], DatanodeInfoWithStorage[127.0.0.1:44959,DS-e9ee80de-25e2-4782-abd9-24b5fa243d58,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]) is bad. 2024-11-16T19:28:13,859 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741872_1055 2024-11-16T19:28:13,860 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK] 2024-11-16T19:28:13,861 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:13,861 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK], DatanodeInfoWithStorage[127.0.0.1:33499,DS-12a5ff31-91b9-4c6a-9760-f2cc12c0df6d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK]) is bad. 2024-11-16T19:28:13,861 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741873_1056 2024-11-16T19:28:13,862 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK] 2024-11-16T19:28:13,863 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:13,863 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44959,DS-e9ee80de-25e2-4782-abd9-24b5fa243d58,DISK], DatanodeInfoWithStorage[127.0.0.1:39343,DS-61da5b1f-894e-4c9a-915d-cea845e6a001,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44959,DS-e9ee80de-25e2-4782-abd9-24b5fa243d58,DISK]) is bad. 2024-11-16T19:28:13,863 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741874_1057 2024-11-16T19:28:13,864 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44959,DS-e9ee80de-25e2-4782-abd9-24b5fa243d58,DISK] 2024-11-16T19:28:13,865 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:13,865 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39343,DS-61da5b1f-894e-4c9a-915d-cea845e6a001,DISK], DatanodeInfoWithStorage[127.0.0.1:33499,DS-12a5ff31-91b9-4c6a-9760-f2cc12c0df6d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39343,DS-61da5b1f-894e-4c9a-915d-cea845e6a001,DISK]) is bad. 2024-11-16T19:28:13,865 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741875_1058 2024-11-16T19:28:13,865 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39343,DS-61da5b1f-894e-4c9a-915d-cea845e6a001,DISK] 2024-11-16T19:28:13,866 WARN [IPC Server handler 2 on default port 41599 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T19:28:13,866 WARN [IPC Server handler 2 on default port 41599 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T19:28:13,866 WARN [IPC Server handler 2 on default port 41599 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T19:28:13,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33499 is added to blk_1073741876_1059 (size=6027) 2024-11-16T19:28:14,240 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@15e85e4c[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33499, datanodeUuid=610fb1e1-0ee3-41c4-8b6b-5fcbf217455a, infoPort=44743, infoSecurePort=0, ipcPort=44543, storageInfo=lv=-57;cid=testClusterID;nsid=714393905;c=1731785271598):Failed to transfer BP-1424824963-172.17.0.2-1731785271598:blk_1073741846_1029 to 127.0.0.1:44959 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:14,240 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1b2fb281[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33499, datanodeUuid=610fb1e1-0ee3-41c4-8b6b-5fcbf217455a, infoPort=44743, infoSecurePort=0, ipcPort=44543, storageInfo=lv=-57;cid=testClusterID;nsid=714393905;c=1731785271598):Failed to transfer BP-1424824963-172.17.0.2-1731785271598:blk_1073741856_1039 to 127.0.0.1:32871 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:14,270 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/.tmp/info/5adecd86e3854862b946f3bf93ed5b83 2024-11-16T19:28:14,281 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/.tmp/info/5adecd86e3854862b946f3bf93ed5b83 as hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/5adecd86e3854862b946f3bf93ed5b83 2024-11-16T19:28:14,286 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/5adecd86e3854862b946f3bf93ed5b83, entries=1, sequenceid=45, filesize=5.9 K 2024-11-16T19:28:14,287 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for c8746695b3d38667decb7d27505c93be in 440ms, sequenceid=45, compaction requested=false 2024-11-16T19:28:14,287 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c8746695b3d38667decb7d27505c93be: 2024-11-16T19:28:14,287 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-16T19:28:14,288 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T19:28:14,288 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/fb602438c6d848c7a3c6b00eb9f38e78 because midkey is the same as first or last row 2024-11-16T19:28:14,394 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:14,394 WARN [regionserver/d11ab77873cb:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-16T19:28:14,406 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:14,469 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:28:14,472 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T19:28:14,473 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T19:28:14,473 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T19:28:14,473 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T19:28:14,476 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@66e63d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/hadoop.log.dir/,AVAILABLE} 2024-11-16T19:28:14,476 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7d04364e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T19:28:14,570 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@633469fc{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/java.io.tmpdir/jetty-localhost-39905-hadoop-hdfs-3_4_1-tests_jar-_-any-8476270256738666555/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:28:14,571 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@28f4e296{HTTP/1.1, (http/1.1)}{localhost:39905} 2024-11-16T19:28:14,571 INFO [Time-limited test {}] server.Server(415): Started @121225ms 2024-11-16T19:28:14,572 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T19:28:14,641 WARN [Thread-977 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T19:28:14,648 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2a748a60321411a7 with lease ID 0xf34050bba09898ec: from storage DS-e9ee80de-25e2-4782-abd9-24b5fa243d58 node DatanodeRegistration(127.0.0.1:37617, datanodeUuid=b65ae952-8681-4d98-9cea-83182561fe12, infoPort=40045, infoSecurePort=0, ipcPort=42173, storageInfo=lv=-57;cid=testClusterID;nsid=714393905;c=1731785271598), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T19:28:14,648 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2a748a60321411a7 with lease ID 0xf34050bba09898ec: from storage DS-837ddefb-7f5a-4437-8ee1-23cf6bb243b8 node DatanodeRegistration(127.0.0.1:37617, datanodeUuid=b65ae952-8681-4d98-9cea-83182561fe12, infoPort=40045, infoSecurePort=0, ipcPort=42173, storageInfo=lv=-57;cid=testClusterID;nsid=714393905;c=1731785271598), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:28:15,632 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:16,244 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@15e85e4c[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33499, datanodeUuid=610fb1e1-0ee3-41c4-8b6b-5fcbf217455a, infoPort=44743, infoSecurePort=0, ipcPort=44543, storageInfo=lv=-57;cid=testClusterID;nsid=714393905;c=1731785271598):Failed to transfer BP-1424824963-172.17.0.2-1731785271598:blk_1073741871_1054 to 127.0.0.1:39343 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:16,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37617 is added to blk_1073741866_1049 (size=6027) 2024-11-16T19:28:16,394 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:16,407 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:17,240 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1b2fb281[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33499, datanodeUuid=610fb1e1-0ee3-41c4-8b6b-5fcbf217455a, infoPort=44743, infoSecurePort=0, ipcPort=44543, storageInfo=lv=-57;cid=testClusterID;nsid=714393905;c=1731785271598):Failed to transfer BP-1424824963-172.17.0.2-1731785271598:blk_1073741876_1059 to 127.0.0.1:39343 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:17,633 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:18,395 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:18,407 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:19,634 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:20,395 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:20,407 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:21,634 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:22,214 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T19:28:22,396 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:22,408 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:22,539 ERROR [FSHLog-0-hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData-prefix:d11ab77873cb,42783,1731785272235 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:22,539 WARN [FSHLog-0-hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData-prefix:d11ab77873cb,42783,1731785272235 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:22,540 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog d11ab77873cb%2C42783%2C1731785272235:(num 1731785272443) roll requested 2024-11-16T19:28:22,540 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C42783%2C1731785272235.1731785302540 2024-11-16T19:28:22,545 WARN [Thread-998 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:22,545 WARN [Thread-998 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK], DatanodeInfoWithStorage[127.0.0.1:39343,DS-61da5b1f-894e-4c9a-915d-cea845e6a001,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK]) is bad. 2024-11-16T19:28:22,546 WARN [Thread-998 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741877_1060 2024-11-16T19:28:22,547 WARN [Thread-998 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK] 2024-11-16T19:28:22,549 WARN [Thread-998 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:22,549 WARN [Thread-998 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39343,DS-61da5b1f-894e-4c9a-915d-cea845e6a001,DISK], DatanodeInfoWithStorage[127.0.0.1:37617,DS-e9ee80de-25e2-4782-abd9-24b5fa243d58,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39343,DS-61da5b1f-894e-4c9a-915d-cea845e6a001,DISK]) is bad. 2024-11-16T19:28:22,549 WARN [Thread-998 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741878_1061 2024-11-16T19:28:22,550 WARN [Thread-998 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39343,DS-61da5b1f-894e-4c9a-915d-cea845e6a001,DISK] 2024-11-16T19:28:22,557 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:22,557 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:22,557 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:22,558 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:22,558 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:22,558 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/WALs/d11ab77873cb,42783,1731785272235/d11ab77873cb%2C42783%2C1731785272235.1731785272443 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/WALs/d11ab77873cb,42783,1731785272235/d11ab77873cb%2C42783%2C1731785272235.1731785302540 2024-11-16T19:28:22,559 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:22,559 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:22,559 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/WALs/d11ab77873cb,42783,1731785272235/d11ab77873cb%2C42783%2C1731785272235.1731785272443 2024-11-16T19:28:22,559 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44743:44743),(127.0.0.1/127.0.0.1:40045:40045)] 2024-11-16T19:28:22,560 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/WALs/d11ab77873cb,42783,1731785272235/d11ab77873cb%2C42783%2C1731785272235.1731785272443 is not closed yet, will try archiving it next time 2024-11-16T19:28:22,560 WARN [IPC Server handler 0 on default port 41599 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/WALs/d11ab77873cb,42783,1731785272235/d11ab77873cb%2C42783%2C1731785272235.1731785272443 has not been closed. Lease recovery is in progress. RecoveryId = 1063 for block blk_1073741830_1006 2024-11-16T19:28:22,560 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/WALs/d11ab77873cb,42783,1731785272235/d11ab77873cb%2C42783%2C1731785272235.1731785272443 after 1ms 2024-11-16T19:28:23,635 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:24,396 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:24,664 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@45081e38 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1424824963-172.17.0.2-1731785271598:blk_1073741832_1008, datanode=DatanodeInfoWithStorage[127.0.0.1:46515,null,null]) java.net.ConnectException: Call From d11ab77873cb/172.17.0.2 to localhost:33759 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-16T19:28:24,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37617 is added to blk_1073741832_1019 (size=455) 2024-11-16T19:28:25,341 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.1731785272823 to hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/oldWALs/d11ab77873cb%2C36045%2C1731785272299.1731785272823 2024-11-16T19:28:25,342 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.1731785290344 to hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/oldWALs/d11ab77873cb%2C36045%2C1731785272299.1731785290344 2024-11-16T19:28:25,635 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:25,645 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@74073c2f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37617, datanodeUuid=b65ae952-8681-4d98-9cea-83182561fe12, infoPort=40045, infoSecurePort=0, ipcPort=42173, storageInfo=lv=-57;cid=testClusterID;nsid=714393905;c=1731785271598):Failed to transfer BP-1424824963-172.17.0.2-1731785271598:blk_1073741832_1019 to 127.0.0.1:46515 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:26,397 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:26,562 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/WALs/d11ab77873cb,42783,1731785272235/d11ab77873cb%2C42783%2C1731785272235.1731785272443 after 4002ms 2024-11-16T19:28:27,636 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:28,397 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:28,645 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@74073c2f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37617, datanodeUuid=b65ae952-8681-4d98-9cea-83182561fe12, infoPort=40045, infoSecurePort=0, ipcPort=42173, storageInfo=lv=-57;cid=testClusterID;nsid=714393905;c=1731785271598):Failed to transfer BP-1424824963-172.17.0.2-1731785271598:blk_1073741835_1011 to 127.0.0.1:32871 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:28,645 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@20dbd28a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37617, datanodeUuid=b65ae952-8681-4d98-9cea-83182561fe12, infoPort=40045, infoSecurePort=0, ipcPort=42173, storageInfo=lv=-57;cid=testClusterID;nsid=714393905;c=1731785271598):Failed to transfer BP-1424824963-172.17.0.2-1731785271598:blk_1073741833_1009 to 127.0.0.1:32871 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:29,636 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:29,646 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@74073c2f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37617, datanodeUuid=b65ae952-8681-4d98-9cea-83182561fe12, infoPort=40045, infoSecurePort=0, ipcPort=42173, storageInfo=lv=-57;cid=testClusterID;nsid=714393905;c=1731785271598):Failed to transfer BP-1424824963-172.17.0.2-1731785271598:blk_1073741829_1005 to 127.0.0.1:32871 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:29,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33499 is added to blk_1073741831_1007 (size=1321) 2024-11-16T19:28:29,964 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C36045%2C1731785272299.1731785309964 2024-11-16T19:28:29,970 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:29,971 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:29,971 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:29,971 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:29,971 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:29,971 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.1731785292374 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.1731785309964 2024-11-16T19:28:29,973 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40045:40045),(127.0.0.1/127.0.0.1:44743:44743)] 2024-11-16T19:28:29,973 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.1731785292374 is not closed yet, will try archiving it next time 2024-11-16T19:28:29,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33499 is added to blk_1073741861_1044 (size=13591) 2024-11-16T19:28:29,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36045 {}] regionserver.HRegion(8855): Flush requested on c8746695b3d38667decb7d27505c93be 2024-11-16T19:28:29,987 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c8746695b3d38667decb7d27505c93be 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-16T19:28:29,994 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/.tmp/info/fb5ef768a39742c4883ca8111dfbfe77 is 1080, key is row0013/info:/1731785309975/Put/seqid=0 2024-11-16T19:28:30,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37617 is added to blk_1073741881_1065 (size=11421) 2024-11-16T19:28:30,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33499 is added to blk_1073741881_1065 (size=11421) 2024-11-16T19:28:30,004 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/.tmp/info/fb5ef768a39742c4883ca8111dfbfe77 2024-11-16T19:28:30,015 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/.tmp/info/fb5ef768a39742c4883ca8111dfbfe77 as hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/fb5ef768a39742c4883ca8111dfbfe77 2024-11-16T19:28:30,022 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/fb5ef768a39742c4883ca8111dfbfe77, entries=6, sequenceid=55, filesize=11.2 K 2024-11-16T19:28:30,023 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=6.30 KB/6455 for c8746695b3d38667decb7d27505c93be in 36ms, sequenceid=55, compaction requested=true 2024-11-16T19:28:30,024 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c8746695b3d38667decb7d27505c93be: 2024-11-16T19:28:30,024 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.6 K, sizeToCheck=16.0 K 2024-11-16T19:28:30,024 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T19:28:30,024 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/fb602438c6d848c7a3c6b00eb9f38e78 because midkey is the same as first or last row 2024-11-16T19:28:30,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8746695b3d38667decb7d27505c93be:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T19:28:30,024 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T19:28:30,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T19:28:30,025 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35442 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T19:28:30,025 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.HStore(1541): c8746695b3d38667decb7d27505c93be/info is initiating minor compaction (all files) 2024-11-16T19:28:30,026 INFO [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c8746695b3d38667decb7d27505c93be/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be. 2024-11-16T19:28:30,026 INFO [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/fb602438c6d848c7a3c6b00eb9f38e78, hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/5adecd86e3854862b946f3bf93ed5b83, hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/fb5ef768a39742c4883ca8111dfbfe77] into tmpdir=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/.tmp, totalSize=34.6 K 2024-11-16T19:28:30,026 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] compactions.Compactor(225): Compacting fb602438c6d848c7a3c6b00eb9f38e78, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731785286323 2024-11-16T19:28:30,026 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5adecd86e3854862b946f3bf93ed5b83, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731785293846 2024-11-16T19:28:30,027 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] compactions.Compactor(225): Compacting fb5ef768a39742c4883ca8111dfbfe77, keycount=6, bloomtype=ROW, size=11.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731785294253 2024-11-16T19:28:30,043 INFO [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8746695b3d38667decb7d27505c93be#info#compaction#24 average throughput is 8.72 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T19:28:30,044 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/.tmp/info/3ad1b5dc13624582be381a735c0102b0 is 1080, key is row0002/info:/1731785286323/Put/seqid=0 2024-11-16T19:28:30,046 WARN [Thread-1026 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:30,046 WARN [Thread-1026 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741882_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK], DatanodeInfoWithStorage[127.0.0.1:37617,DS-e9ee80de-25e2-4782-abd9-24b5fa243d58,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK]) is bad. 2024-11-16T19:28:30,046 WARN [Thread-1026 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741882_1066 2024-11-16T19:28:30,046 WARN [Thread-1026 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK] 2024-11-16T19:28:30,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37617 is added to blk_1073741883_1067 (size=23502) 2024-11-16T19:28:30,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33499 is added to blk_1073741883_1067 (size=23502) 2024-11-16T19:28:30,057 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/.tmp/info/3ad1b5dc13624582be381a735c0102b0 as hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/3ad1b5dc13624582be381a735c0102b0 2024-11-16T19:28:30,063 INFO [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c8746695b3d38667decb7d27505c93be/info of c8746695b3d38667decb7d27505c93be into 3ad1b5dc13624582be381a735c0102b0(size=23.0 K), total size for store is 23.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T19:28:30,063 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c8746695b3d38667decb7d27505c93be: 2024-11-16T19:28:30,063 INFO [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be., storeName=c8746695b3d38667decb7d27505c93be/info, priority=13, startTime=1731785310024; duration=0sec 2024-11-16T19:28:30,064 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-16T19:28:30,064 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T19:28:30,064 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/3ad1b5dc13624582be381a735c0102b0 because midkey is the same as first or last row 2024-11-16T19:28:30,064 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-16T19:28:30,064 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T19:28:30,064 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/3ad1b5dc13624582be381a735c0102b0 because midkey is the same as first or last row 2024-11-16T19:28:30,064 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-16T19:28:30,064 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T19:28:30,064 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/3ad1b5dc13624582be381a735c0102b0 because midkey is the same as first or last row 2024-11-16T19:28:30,064 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T19:28:30,064 DEBUG [RS:0;d11ab77873cb:36045-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8746695b3d38667decb7d27505c93be:info 2024-11-16T19:28:30,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36045 {}] regionserver.HRegion(8855): Flush requested on c8746695b3d38667decb7d27505c93be 2024-11-16T19:28:30,213 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c8746695b3d38667decb7d27505c93be 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-16T19:28:30,221 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/.tmp/info/4bde742acd7b456f9e242f099b59ec28 is 1080, key is row0018/info:/1731785309990/Put/seqid=0 2024-11-16T19:28:30,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33499 is added to blk_1073741884_1068 (size=11421) 2024-11-16T19:28:30,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37617 is added to blk_1073741884_1068 (size=11421) 2024-11-16T19:28:30,231 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/.tmp/info/4bde742acd7b456f9e242f099b59ec28 2024-11-16T19:28:30,237 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/.tmp/info/4bde742acd7b456f9e242f099b59ec28 as hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/4bde742acd7b456f9e242f099b59ec28 2024-11-16T19:28:30,242 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/4bde742acd7b456f9e242f099b59ec28, entries=6, sequenceid=66, filesize=11.2 K 2024-11-16T19:28:30,243 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=0 B/0 for c8746695b3d38667decb7d27505c93be in 31ms, sequenceid=66, compaction requested=false 2024-11-16T19:28:30,244 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c8746695b3d38667decb7d27505c93be: 2024-11-16T19:28:30,244 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.1 K, sizeToCheck=16.0 K 2024-11-16T19:28:30,244 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T19:28:30,244 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/3ad1b5dc13624582be381a735c0102b0 because midkey is the same as first or last row 2024-11-16T19:28:30,398 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:30,398 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-16T19:28:30,414 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T19:28:30,414 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T19:28:30,415 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T19:28:30,415 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:28:30,415 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:28:30,416 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T19:28:30,416 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T19:28:30,416 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1001612558, stopped=false 2024-11-16T19:28:30,417 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=d11ab77873cb,42783,1731785272235 2024-11-16T19:28:30,420 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36045-0x1004a0188070001, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T19:28:30,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45785-0x1004a0188070002, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T19:28:30,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42783-0x1004a0188070000, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T19:28:30,420 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36045-0x1004a0188070001, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:28:30,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45785-0x1004a0188070002, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:28:30,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42783-0x1004a0188070000, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:28:30,421 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T19:28:30,421 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T19:28:30,422 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45785-0x1004a0188070002, quorum=127.0.0.1:59815, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T19:28:30,422 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36045-0x1004a0188070001, quorum=127.0.0.1:59815, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T19:28:30,423 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42783-0x1004a0188070000, quorum=127.0.0.1:59815, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T19:28:30,423 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T19:28:30,423 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:28:30,423 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'd11ab77873cb,36045,1731785272299' ***** 2024-11-16T19:28:30,424 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T19:28:30,424 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'd11ab77873cb,45785,1731785273562' ***** 2024-11-16T19:28:30,424 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T19:28:30,424 INFO [RS:0;d11ab77873cb:36045 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T19:28:30,424 INFO [RS:1;d11ab77873cb:45785 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T19:28:30,424 INFO [RS:0;d11ab77873cb:36045 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T19:28:30,424 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T19:28:30,424 INFO [RS:0;d11ab77873cb:36045 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T19:28:30,424 INFO [RS:1;d11ab77873cb:45785 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T19:28:30,424 INFO [RS:1;d11ab77873cb:45785 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T19:28:30,424 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T19:28:30,424 INFO [RS:0;d11ab77873cb:36045 {}] regionserver.HRegionServer(3091): Received CLOSE for c8746695b3d38667decb7d27505c93be 2024-11-16T19:28:30,425 INFO [RS:1;d11ab77873cb:45785 {}] regionserver.HRegionServer(959): stopping server d11ab77873cb,45785,1731785273562 2024-11-16T19:28:30,425 INFO [RS:1;d11ab77873cb:45785 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T19:28:30,425 INFO [RS:1;d11ab77873cb:45785 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;d11ab77873cb:45785. 2024-11-16T19:28:30,425 INFO [RS:0;d11ab77873cb:36045 {}] regionserver.HRegionServer(959): stopping server d11ab77873cb,36045,1731785272299 2024-11-16T19:28:30,425 INFO [RS:0;d11ab77873cb:36045 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T19:28:30,425 DEBUG [RS:1;d11ab77873cb:45785 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T19:28:30,425 DEBUG [RS:1;d11ab77873cb:45785 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:28:30,425 INFO [RS:0;d11ab77873cb:36045 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;d11ab77873cb:36045. 2024-11-16T19:28:30,425 DEBUG [RS:0;d11ab77873cb:36045 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T19:28:30,425 INFO [RS:1;d11ab77873cb:45785 {}] regionserver.HRegionServer(976): stopping server d11ab77873cb,45785,1731785273562; all regions closed. 2024-11-16T19:28:30,425 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing c8746695b3d38667decb7d27505c93be, disabling compactions & flushes 2024-11-16T19:28:30,425 DEBUG [RS:0;d11ab77873cb:36045 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:28:30,425 INFO [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be. 2024-11-16T19:28:30,425 INFO [RS:0;d11ab77873cb:36045 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T19:28:30,425 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be. 2024-11-16T19:28:30,425 INFO [RS:0;d11ab77873cb:36045 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T19:28:30,425 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be. after waiting 0 ms 2024-11-16T19:28:30,426 INFO [RS:0;d11ab77873cb:36045 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T19:28:30,426 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be. 2024-11-16T19:28:30,426 INFO [RS:0;d11ab77873cb:36045 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T19:28:30,426 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:30,426 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:30,426 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:30,426 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/2a48bf96e2b64359a9416be45c98eb23, hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/ed85c619ece7495d8e4b3d7d4d6cd859, hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/fb602438c6d848c7a3c6b00eb9f38e78, hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/65c43295823440dbba867bde6886a62b, hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/5adecd86e3854862b946f3bf93ed5b83, hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/fb5ef768a39742c4883ca8111dfbfe77] to archive 2024-11-16T19:28:30,426 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:30,426 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:30,427 INFO [RS:0;d11ab77873cb:36045 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-16T19:28:30,427 DEBUG [RS:0;d11ab77873cb:36045 {}] regionserver.HRegionServer(1325): Online Regions={c8746695b3d38667decb7d27505c93be=TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be., 1588230740=hbase:meta,,1.1588230740} 2024-11-16T19:28:30,427 DEBUG [RS:0;d11ab77873cb:36045 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, c8746695b3d38667decb7d27505c93be 2024-11-16T19:28:30,427 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T19:28:30,427 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T19:28:30,427 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:30,427 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T19:28:30,427 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T19:28:30,427 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:30,427 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T19:28:30,427 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 2024-11-16T19:28:30,427 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T19:28:30,428 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-16T19:28:30,428 WARN [IPC Server handler 3 on default port 41599 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 has not been closed. Lease recovery is in progress. RecoveryId = 1069 for block blk_1073741837_1013 2024-11-16T19:28:30,428 ERROR [FSHLog-0-hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4-prefix:d11ab77873cb,36045,1731785272299.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:30,428 WARN [FSHLog-0-hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4-prefix:d11ab77873cb,36045,1731785272299.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:30,428 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 after 1ms 2024-11-16T19:28:30,428 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d11ab77873cb%2C36045%2C1731785272299.meta:.meta(num 1731785273336) roll requested 2024-11-16T19:28:30,428 INFO [regionserver/d11ab77873cb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C36045%2C1731785272299.meta.1731785310428.meta 2024-11-16T19:28:30,429 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/2a48bf96e2b64359a9416be45c98eb23 to hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/2a48bf96e2b64359a9416be45c98eb23 2024-11-16T19:28:30,431 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/ed85c619ece7495d8e4b3d7d4d6cd859 to hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/ed85c619ece7495d8e4b3d7d4d6cd859 2024-11-16T19:28:30,432 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/fb602438c6d848c7a3c6b00eb9f38e78 to hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/fb602438c6d848c7a3c6b00eb9f38e78 2024-11-16T19:28:30,433 WARN [Thread-1038 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1070 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32871 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:30,433 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:51274 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741885_1070] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data4]'}, localName='127.0.0.1:37617', datanodeUuid='b65ae952-8681-4d98-9cea-83182561fe12', xmitsInProgress=0}:Exception transferring block BP-1424824963-172.17.0.2-1731785271598:blk_1073741885_1070 to mirror 127.0.0.1:32871 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:30,433 WARN [Thread-1038 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741885_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37617,DS-e9ee80de-25e2-4782-abd9-24b5fa243d58,DISK], DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK]) is bad. 2024-11-16T19:28:30,433 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/65c43295823440dbba867bde6886a62b to hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/65c43295823440dbba867bde6886a62b 2024-11-16T19:28:30,433 WARN [Thread-1038 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741885_1070 2024-11-16T19:28:30,433 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:51274 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741885_1070] {}] datanode.BlockReceiver(316): Block 1073741885 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-16T19:28:30,434 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:51274 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741885_1070] {}] datanode.DataXceiver(331): 127.0.0.1:37617:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51274 dst: /127.0.0.1:37617 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:30,434 WARN [Thread-1038 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK] 2024-11-16T19:28:30,435 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/5adecd86e3854862b946f3bf93ed5b83 to hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/5adecd86e3854862b946f3bf93ed5b83 2024-11-16T19:28:30,436 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/fb5ef768a39742c4883ca8111dfbfe77 to hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/info/fb5ef768a39742c4883ca8111dfbfe77 2024-11-16T19:28:30,437 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=d11ab77873cb:42783 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-16T19:28:30,437 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [2a48bf96e2b64359a9416be45c98eb23=10347, ed85c619ece7495d8e4b3d7d4d6cd859=12506, fb602438c6d848c7a3c6b00eb9f38e78=17994, 65c43295823440dbba867bde6886a62b=6027, 5adecd86e3854862b946f3bf93ed5b83=6027, fb5ef768a39742c4883ca8111dfbfe77=11421] 2024-11-16T19:28:30,438 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:30,438 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:30,438 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:30,438 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:30,438 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:30,439 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785310428.meta 2024-11-16T19:28:30,439 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:30,439 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46515,DS-e9aadc50-b1dd-4762-a471-cfe1a1f930e9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:30,439 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta 2024-11-16T19:28:30,439 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40045:40045),(127.0.0.1/127.0.0.1:44743:44743)] 2024-11-16T19:28:30,439 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta is not closed yet, will try archiving it next time 2024-11-16T19:28:30,440 WARN [IPC Server handler 0 on default port 41599 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta has not been closed. Lease recovery is in progress. RecoveryId = 1072 for block blk_1073741834_1010 2024-11-16T19:28:30,440 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta after 1ms 2024-11-16T19:28:30,441 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/c8746695b3d38667decb7d27505c93be/recovered.edits/69.seqid, newMaxSeqId=69, maxSeqId=1 2024-11-16T19:28:30,442 INFO [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be. 2024-11-16T19:28:30,442 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for c8746695b3d38667decb7d27505c93be: Waiting for close lock at 1731785310425Running coprocessor pre-close hooks at 1731785310425Disabling compacts and flushes for region at 1731785310425Disabling writes for close at 1731785310425Writing region close event to WAL at 1731785310438 (+13 ms)Running coprocessor post-close hooks at 1731785310442 (+4 ms)Closed at 1731785310442 2024-11-16T19:28:30,442 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be. 2024-11-16T19:28:30,454 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/hbase/meta/1588230740/.tmp/info/a8230f62bf86427cafcd819ec7bd1cc9 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731785273705.c8746695b3d38667decb7d27505c93be./info:regioninfo/1731785274086/Put/seqid=0 2024-11-16T19:28:30,455 WARN [Thread-1046 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1073 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:30,455 WARN [Thread-1046 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741887_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK], DatanodeInfoWithStorage[127.0.0.1:37617,DS-e9ee80de-25e2-4782-abd9-24b5fa243d58,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK]) is bad. 2024-11-16T19:28:30,456 WARN [Thread-1046 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741887_1073 2024-11-16T19:28:30,456 WARN [Thread-1046 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK] 2024-11-16T19:28:30,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33499 is added to blk_1073741888_1074 (size=7089) 2024-11-16T19:28:30,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37617 is added to blk_1073741888_1074 (size=7089) 2024-11-16T19:28:30,461 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/hbase/meta/1588230740/.tmp/info/a8230f62bf86427cafcd819ec7bd1cc9 2024-11-16T19:28:30,479 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/hbase/meta/1588230740/.tmp/ns/931a094ee26646b09b494bc669d58a7e is 43, key is default/ns:d/1731785273427/Put/seqid=0 2024-11-16T19:28:30,482 WARN [Thread-1052 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1075 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32871 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:30,482 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:47974 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741889_1075] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data10]'}, localName='127.0.0.1:33499', datanodeUuid='610fb1e1-0ee3-41c4-8b6b-5fcbf217455a', xmitsInProgress=0}:Exception transferring block BP-1424824963-172.17.0.2-1731785271598:blk_1073741889_1075 to mirror 127.0.0.1:32871 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:30,482 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:47974 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741889_1075] {}] datanode.BlockReceiver(316): Block 1073741889 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T19:28:30,482 WARN [Thread-1052 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1424824963-172.17.0.2-1731785271598:blk_1073741889_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33499,DS-12a5ff31-91b9-4c6a-9760-f2cc12c0df6d,DISK], DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK]) is bad. 2024-11-16T19:28:30,482 WARN [Thread-1052 {}] hdfs.DataStreamer(1850): Abandoning BP-1424824963-172.17.0.2-1731785271598:blk_1073741889_1075 2024-11-16T19:28:30,482 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1174396979_22 at /127.0.0.1:47974 [Receiving block BP-1424824963-172.17.0.2-1731785271598:blk_1073741889_1075] {}] datanode.DataXceiver(331): 127.0.0.1:33499:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47974 dst: /127.0.0.1:33499 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:30,483 WARN [Thread-1052 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32871,DS-aa473454-493d-4d15-b8df-a374168d11df,DISK] 2024-11-16T19:28:30,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37617 is added to blk_1073741890_1076 (size=5153) 2024-11-16T19:28:30,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33499 is added to blk_1073741890_1076 (size=5153) 2024-11-16T19:28:30,488 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/hbase/meta/1588230740/.tmp/ns/931a094ee26646b09b494bc669d58a7e 2024-11-16T19:28:30,507 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/hbase/meta/1588230740/.tmp/table/817467b9166148f8a7d793f3ac0625e9 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731785274096/Put/seqid=0 2024-11-16T19:28:30,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33499 is added to blk_1073741891_1077 (size=5424) 2024-11-16T19:28:30,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37617 is added to blk_1073741891_1077 (size=5424) 2024-11-16T19:28:30,513 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/hbase/meta/1588230740/.tmp/table/817467b9166148f8a7d793f3ac0625e9 2024-11-16T19:28:30,518 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/hbase/meta/1588230740/.tmp/info/a8230f62bf86427cafcd819ec7bd1cc9 as hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/hbase/meta/1588230740/info/a8230f62bf86427cafcd819ec7bd1cc9 2024-11-16T19:28:30,525 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/hbase/meta/1588230740/info/a8230f62bf86427cafcd819ec7bd1cc9, entries=10, sequenceid=11, filesize=6.9 K 2024-11-16T19:28:30,527 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/hbase/meta/1588230740/.tmp/ns/931a094ee26646b09b494bc669d58a7e as hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/hbase/meta/1588230740/ns/931a094ee26646b09b494bc669d58a7e 2024-11-16T19:28:30,534 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/hbase/meta/1588230740/ns/931a094ee26646b09b494bc669d58a7e, entries=2, sequenceid=11, filesize=5.0 K 2024-11-16T19:28:30,535 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/hbase/meta/1588230740/.tmp/table/817467b9166148f8a7d793f3ac0625e9 as hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/hbase/meta/1588230740/table/817467b9166148f8a7d793f3ac0625e9 2024-11-16T19:28:30,540 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/hbase/meta/1588230740/table/817467b9166148f8a7d793f3ac0625e9, entries=2, sequenceid=11, filesize=5.3 K 2024-11-16T19:28:30,542 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 114ms, sequenceid=11, compaction requested=false 2024-11-16T19:28:30,546 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-16T19:28:30,547 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T19:28:30,547 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T19:28:30,547 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731785310427Running coprocessor pre-close hooks at 1731785310427Disabling compacts and flushes for region at 1731785310427Disabling writes for close at 1731785310427Obtaining lock to block concurrent updates at 1731785310428 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731785310428Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731785310428Flushing stores of hbase:meta,,1.1588230740 at 1731785310440 (+12 ms)Flushing 1588230740/info: creating writer at 1731785310440Flushing 1588230740/info: appending metadata at 1731785310453 (+13 ms)Flushing 1588230740/info: closing flushed file at 1731785310453Flushing 1588230740/ns: creating writer at 1731785310466 (+13 ms)Flushing 1588230740/ns: appending metadata at 1731785310479 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1731785310479Flushing 1588230740/table: creating writer at 1731785310494 (+15 ms)Flushing 1588230740/table: appending metadata at 1731785310507 (+13 ms)Flushing 1588230740/table: closing flushed file at 1731785310507Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@cd54777: reopening flushed file at 1731785310517 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@477de0d2: reopening flushed file at 1731785310526 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6ab7268f: reopening flushed file at 1731785310534 (+8 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 114ms, sequenceid=11, compaction requested=false at 1731785310542 (+8 ms)Writing region close event to WAL at 1731785310543 (+1 ms)Running coprocessor post-close hooks at 1731785310547 (+4 ms)Closed at 1731785310547 2024-11-16T19:28:30,547 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T19:28:30,627 INFO [RS:0;d11ab77873cb:36045 {}] regionserver.HRegionServer(976): stopping server d11ab77873cb,36045,1731785272299; all regions closed. 2024-11-16T19:28:30,628 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:30,628 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:30,628 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:30,628 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:30,628 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:30,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33499 is added to blk_1073741886_1071 (size=825) 2024-11-16T19:28:30,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37617 is added to blk_1073741886_1071 (size=825) 2024-11-16T19:28:30,673 INFO [regionserver/d11ab77873cb:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T19:28:30,695 INFO [regionserver/d11ab77873cb:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-16T19:28:30,695 INFO [regionserver/d11ab77873cb:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-16T19:28:30,751 INFO [regionserver/d11ab77873cb:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-16T19:28:30,751 INFO [regionserver/d11ab77873cb:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-16T19:28:31,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37617 is added to blk_1073741861_1044 (size=13591) 2024-11-16T19:28:31,638 INFO [regionserver/d11ab77873cb:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T19:28:31,648 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@74073c2f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37617, datanodeUuid=b65ae952-8681-4d98-9cea-83182561fe12, infoPort=40045, infoSecurePort=0, ipcPort=42173, storageInfo=lv=-57;cid=testClusterID;nsid=714393905;c=1731785271598):Failed to transfer BP-1424824963-172.17.0.2-1731785271598:blk_1073741828_1004 to 127.0.0.1:32871 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:31,648 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@20dbd28a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37617, datanodeUuid=b65ae952-8681-4d98-9cea-83182561fe12, infoPort=40045, infoSecurePort=0, ipcPort=42173, storageInfo=lv=-57;cid=testClusterID;nsid=714393905;c=1731785271598):Failed to transfer BP-1424824963-172.17.0.2-1731785271598:blk_1073741836_1012 to 127.0.0.1:32871 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:32,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33499 is added to blk_1073741826_1002 (size=42) 2024-11-16T19:28:33,467 INFO [master/d11ab77873cb:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-16T19:28:33,467 INFO [master/d11ab77873cb:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-16T19:28:34,430 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 after 4003ms 2024-11-16T19:28:34,441 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta after 4002ms 2024-11-16T19:28:34,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33499 is added to blk_1073741825_1001 (size=7) 2024-11-16T19:28:34,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33499 is added to blk_1073741827_1003 (size=196) 2024-11-16T19:28:34,669 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@583eae27 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1424824963-172.17.0.2-1731785271598:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:46515,null,null]) java.net.ConnectException: Call From d11ab77873cb/172.17.0.2 to localhost:33759 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-16T19:28:35,428 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-16T19:28:35,434 DEBUG [RS:1;d11ab77873cb:45785 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/oldWALs 2024-11-16T19:28:35,434 INFO [RS:1;d11ab77873cb:45785 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d11ab77873cb%2C45785%2C1731785273562:(num 1731785273801) 2024-11-16T19:28:35,434 DEBUG [RS:1;d11ab77873cb:45785 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:28:35,434 INFO [RS:1;d11ab77873cb:45785 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T19:28:35,434 INFO [RS:1;d11ab77873cb:45785 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T19:28:35,435 INFO [RS:1;d11ab77873cb:45785 {}] hbase.ChoreService(370): Chore service for: regionserver/d11ab77873cb:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-16T19:28:35,436 INFO [RS:1;d11ab77873cb:45785 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T19:28:35,436 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T19:28:35,436 INFO [RS:1;d11ab77873cb:45785 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T19:28:35,436 INFO [RS:1;d11ab77873cb:45785 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T19:28:35,436 INFO [RS:1;d11ab77873cb:45785 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T19:28:35,436 INFO [RS:1;d11ab77873cb:45785 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45785 2024-11-16T19:28:35,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42783-0x1004a0188070000, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T19:28:35,438 INFO [RS:1;d11ab77873cb:45785 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T19:28:35,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45785-0x1004a0188070002, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/d11ab77873cb,45785,1731785273562 2024-11-16T19:28:35,439 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [d11ab77873cb,45785,1731785273562] 2024-11-16T19:28:35,440 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/d11ab77873cb,45785,1731785273562 already deleted, retry=false 2024-11-16T19:28:35,440 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; d11ab77873cb,45785,1731785273562 expired; onlineServers=1 2024-11-16T19:28:35,443 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:28:35,462 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:28:35,462 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:28:35,462 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:28:35,463 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:28:35,463 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:28:35,472 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:28:35,472 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:28:35,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45785-0x1004a0188070002, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T19:28:35,540 INFO [RS:1;d11ab77873cb:45785 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T19:28:35,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45785-0x1004a0188070002, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T19:28:35,540 INFO [RS:1;d11ab77873cb:45785 {}] regionserver.HRegionServer(1031): Exiting; stopping=d11ab77873cb,45785,1731785273562; zookeeper connection closed. 2024-11-16T19:28:35,540 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1f830fe1 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1f830fe1 2024-11-16T19:28:35,629 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-16T19:28:35,637 DEBUG [RS:0;d11ab77873cb:36045 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/oldWALs 2024-11-16T19:28:35,637 INFO [RS:0;d11ab77873cb:36045 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d11ab77873cb%2C36045%2C1731785272299.meta:.meta(num 1731785310428) 2024-11-16T19:28:35,638 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:35,638 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:35,638 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:35,639 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:35,639 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:35,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33499 is added to blk_1073741880_1064 (size=16308) 2024-11-16T19:28:35,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37617 is added to blk_1073741880_1064 (size=16308) 2024-11-16T19:28:35,645 DEBUG [RS:0;d11ab77873cb:36045 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/oldWALs 2024-11-16T19:28:35,645 INFO [RS:0;d11ab77873cb:36045 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d11ab77873cb%2C36045%2C1731785272299:(num 1731785309964) 2024-11-16T19:28:35,645 DEBUG [RS:0;d11ab77873cb:36045 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:28:35,645 INFO [RS:0;d11ab77873cb:36045 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T19:28:35,645 INFO [RS:0;d11ab77873cb:36045 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T19:28:35,645 INFO [RS:0;d11ab77873cb:36045 {}] hbase.ChoreService(370): Chore service for: regionserver/d11ab77873cb:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-16T19:28:35,646 INFO [RS:0;d11ab77873cb:36045 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T19:28:35,646 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T19:28:35,646 INFO [RS:0;d11ab77873cb:36045 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36045 2024-11-16T19:28:35,647 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36045-0x1004a0188070001, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/d11ab77873cb,36045,1731785272299 2024-11-16T19:28:35,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42783-0x1004a0188070000, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T19:28:35,647 INFO [RS:0;d11ab77873cb:36045 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T19:28:35,648 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [d11ab77873cb,36045,1731785272299] 2024-11-16T19:28:35,649 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/d11ab77873cb,36045,1731785272299 already deleted, retry=false 2024-11-16T19:28:35,649 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; d11ab77873cb,36045,1731785272299 expired; onlineServers=0 2024-11-16T19:28:35,649 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'd11ab77873cb,42783,1731785272235' ***** 2024-11-16T19:28:35,649 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T19:28:35,649 INFO [M:0;d11ab77873cb:42783 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T19:28:35,649 INFO [M:0;d11ab77873cb:42783 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T19:28:35,649 DEBUG [M:0;d11ab77873cb:42783 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T19:28:35,649 DEBUG [M:0;d11ab77873cb:42783 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T19:28:35,649 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T19:28:35,649 DEBUG [master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.large.0-1731785272546 {}] cleaner.HFileCleaner(306): Exit Thread[master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.large.0-1731785272546,5,FailOnTimeoutGroup] 2024-11-16T19:28:35,649 DEBUG [master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.small.0-1731785272548 {}] cleaner.HFileCleaner(306): Exit Thread[master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.small.0-1731785272548,5,FailOnTimeoutGroup] 2024-11-16T19:28:35,649 INFO [M:0;d11ab77873cb:42783 {}] hbase.ChoreService(370): Chore service for: master/d11ab77873cb:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T19:28:35,649 INFO [M:0;d11ab77873cb:42783 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T19:28:35,649 DEBUG [M:0;d11ab77873cb:42783 {}] master.HMaster(1795): Stopping service threads 2024-11-16T19:28:35,649 INFO [M:0;d11ab77873cb:42783 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T19:28:35,650 INFO [M:0;d11ab77873cb:42783 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T19:28:35,650 INFO [M:0;d11ab77873cb:42783 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T19:28:35,650 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T19:28:35,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42783-0x1004a0188070000, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T19:28:35,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42783-0x1004a0188070000, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:28:35,650 DEBUG [M:0;d11ab77873cb:42783 {}] zookeeper.ZKUtil(347): master:42783-0x1004a0188070000, quorum=127.0.0.1:59815, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T19:28:35,650 WARN [M:0;d11ab77873cb:42783 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T19:28:35,651 INFO [M:0;d11ab77873cb:42783 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/.lastflushedseqids 2024-11-16T19:28:35,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37617 is added to blk_1073741892_1078 (size=130) 2024-11-16T19:28:35,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33499 is added to blk_1073741892_1078 (size=130) 2024-11-16T19:28:35,663 INFO [M:0;d11ab77873cb:42783 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T19:28:35,663 INFO [M:0;d11ab77873cb:42783 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T19:28:35,663 DEBUG [M:0;d11ab77873cb:42783 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T19:28:35,663 INFO [M:0;d11ab77873cb:42783 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:28:35,663 DEBUG [M:0;d11ab77873cb:42783 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:28:35,663 DEBUG [M:0;d11ab77873cb:42783 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T19:28:35,663 DEBUG [M:0;d11ab77873cb:42783 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:28:35,664 INFO [M:0;d11ab77873cb:42783 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-16T19:28:35,679 DEBUG [M:0;d11ab77873cb:42783 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2d3f90b5e7364b3abfdf208af158e026 is 82, key is hbase:meta,,1/info:regioninfo/1731785273400/Put/seqid=0 2024-11-16T19:28:35,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37617 is added to blk_1073741893_1079 (size=5672) 2024-11-16T19:28:35,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33499 is added to blk_1073741893_1079 (size=5672) 2024-11-16T19:28:35,684 INFO [M:0;d11ab77873cb:42783 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2d3f90b5e7364b3abfdf208af158e026 2024-11-16T19:28:35,704 DEBUG [M:0;d11ab77873cb:42783 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/133181c457104b96938ffe24ebe4da55 is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731785274102/Put/seqid=0 2024-11-16T19:28:35,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33499 is added to blk_1073741894_1080 (size=6255) 2024-11-16T19:28:35,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37617 is added to blk_1073741894_1080 (size=6255) 2024-11-16T19:28:35,710 INFO [M:0;d11ab77873cb:42783 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/133181c457104b96938ffe24ebe4da55 2024-11-16T19:28:35,715 INFO [M:0;d11ab77873cb:42783 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 133181c457104b96938ffe24ebe4da55 2024-11-16T19:28:35,728 DEBUG [M:0;d11ab77873cb:42783 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/97680b860ad1430ab738c0a5bfe3b2be is 69, key is d11ab77873cb,36045,1731785272299/rs:state/1731785272650/Put/seqid=0 2024-11-16T19:28:35,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37617 is added to blk_1073741895_1081 (size=5224) 2024-11-16T19:28:35,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33499 is added to blk_1073741895_1081 (size=5224) 2024-11-16T19:28:35,734 INFO [M:0;d11ab77873cb:42783 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/97680b860ad1430ab738c0a5bfe3b2be 2024-11-16T19:28:35,748 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36045-0x1004a0188070001, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T19:28:35,748 INFO [RS:0;d11ab77873cb:36045 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T19:28:35,748 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36045-0x1004a0188070001, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T19:28:35,748 INFO [RS:0;d11ab77873cb:36045 {}] regionserver.HRegionServer(1031): Exiting; stopping=d11ab77873cb,36045,1731785272299; zookeeper connection closed. 2024-11-16T19:28:35,749 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5e0adc88 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5e0adc88 2024-11-16T19:28:35,749 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-16T19:28:35,753 DEBUG [M:0;d11ab77873cb:42783 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f0463911114c4c4bb749a14c70d87cd7 is 52, key is load_balancer_on/state:d/1731785273539/Put/seqid=0 2024-11-16T19:28:35,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37617 is added to blk_1073741896_1082 (size=5056) 2024-11-16T19:28:35,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33499 is added to blk_1073741896_1082 (size=5056) 2024-11-16T19:28:35,758 INFO [M:0;d11ab77873cb:42783 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f0463911114c4c4bb749a14c70d87cd7 2024-11-16T19:28:35,764 DEBUG [M:0;d11ab77873cb:42783 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2d3f90b5e7364b3abfdf208af158e026 as hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2d3f90b5e7364b3abfdf208af158e026 2024-11-16T19:28:35,769 INFO [M:0;d11ab77873cb:42783 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2d3f90b5e7364b3abfdf208af158e026, entries=8, sequenceid=60, filesize=5.5 K 2024-11-16T19:28:35,770 DEBUG [M:0;d11ab77873cb:42783 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/133181c457104b96938ffe24ebe4da55 as hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/133181c457104b96938ffe24ebe4da55 2024-11-16T19:28:35,776 INFO [M:0;d11ab77873cb:42783 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 133181c457104b96938ffe24ebe4da55 2024-11-16T19:28:35,776 INFO [M:0;d11ab77873cb:42783 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/133181c457104b96938ffe24ebe4da55, entries=6, sequenceid=60, filesize=6.1 K 2024-11-16T19:28:35,777 DEBUG [M:0;d11ab77873cb:42783 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/97680b860ad1430ab738c0a5bfe3b2be as hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/97680b860ad1430ab738c0a5bfe3b2be 2024-11-16T19:28:35,783 INFO [M:0;d11ab77873cb:42783 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/97680b860ad1430ab738c0a5bfe3b2be, entries=2, sequenceid=60, filesize=5.1 K 2024-11-16T19:28:35,784 DEBUG [M:0;d11ab77873cb:42783 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f0463911114c4c4bb749a14c70d87cd7 as hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f0463911114c4c4bb749a14c70d87cd7 2024-11-16T19:28:35,791 INFO [M:0;d11ab77873cb:42783 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f0463911114c4c4bb749a14c70d87cd7, entries=1, sequenceid=60, filesize=4.9 K 2024-11-16T19:28:35,792 INFO [M:0;d11ab77873cb:42783 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 129ms, sequenceid=60, compaction requested=false 2024-11-16T19:28:35,794 INFO [M:0;d11ab77873cb:42783 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:28:35,794 DEBUG [M:0;d11ab77873cb:42783 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731785315663Disabling compacts and flushes for region at 1731785315663Disabling writes for close at 1731785315663Obtaining lock to block concurrent updates at 1731785315664 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731785315664Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1731785315664Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731785315665 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731785315665Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731785315678 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731785315678Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731785315690 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731785315703 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731785315703Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731785315715 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731785315728 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731785315728Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731785315739 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731785315752 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731785315752Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@48d0643e: reopening flushed file at 1731785315763 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@a4bdc8e: reopening flushed file at 1731785315769 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@19aadc77: reopening flushed file at 1731785315776 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@154e1817: reopening flushed file at 1731785315783 (+7 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 129ms, sequenceid=60, compaction requested=false at 1731785315792 (+9 ms)Writing region close event to WAL at 1731785315793 (+1 ms)Closed at 1731785315793 2024-11-16T19:28:35,794 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:35,794 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:35,794 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:35,794 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:35,795 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:35,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37617 is added to blk_1073741879_1062 (size=1045) 2024-11-16T19:28:35,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33499 is added to blk_1073741879_1062 (size=1045) 2024-11-16T19:28:35,797 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T19:28:35,797 INFO [M:0;d11ab77873cb:42783 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T19:28:35,798 INFO [M:0;d11ab77873cb:42783 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42783 2024-11-16T19:28:35,798 INFO [M:0;d11ab77873cb:42783 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T19:28:35,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42783-0x1004a0188070000, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T19:28:35,899 INFO [M:0;d11ab77873cb:42783 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T19:28:35,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42783-0x1004a0188070000, quorum=127.0.0.1:59815, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T19:28:35,901 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@633469fc{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:28:35,902 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@28f4e296{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T19:28:35,902 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T19:28:35,902 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7d04364e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T19:28:35,902 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@66e63d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/hadoop.log.dir/,STOPPED} 2024-11-16T19:28:35,903 WARN [BP-1424824963-172.17.0.2-1731785271598 heartbeating to localhost/127.0.0.1:41599 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T19:28:35,903 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2527e177 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1424824963-172.17.0.2-1731785271598:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:46515,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:33759 , LocalHost:localPort d11ab77873cb/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-16T19:28:35,903 WARN [BP-1424824963-172.17.0.2-1731785271598 heartbeating to localhost/127.0.0.1:41599 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1424824963-172.17.0.2-1731785271598 (Datanode Uuid b65ae952-8681-4d98-9cea-83182561fe12) service to localhost/127.0.0.1:41599 2024-11-16T19:28:35,903 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T19:28:35,903 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T19:28:35,904 ERROR [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2527e177 {}] datanode.DataNode(1743): Cannot find BPOfferService for reporting block received for bpid=BP-1424824963-172.17.0.2-1731785271598 2024-11-16T19:28:35,904 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data3/current/BP-1424824963-172.17.0.2-1731785271598 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:28:35,904 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data4/current/BP-1424824963-172.17.0.2-1731785271598 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:28:35,904 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2527e177 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1424824963-172.17.0.2-1731785271598:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:46515,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1424824963-172.17.0.2-1731785271598 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:35,905 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2527e177 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1424824963-172.17.0.2-1731785271598:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:37617,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1424824963-172.17.0.2-1731785271598 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:35,905 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T19:28:35,905 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2527e177 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1424824963-172.17.0.2-1731785271598:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:46515,null,null], DatanodeInfoWithStorage[127.0.0.1:37617,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1424824963-172.17.0.2-1731785271598:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:46515,null,null], DatanodeInfoWithStorage[127.0.0.1:37617,null,null]] 2024-11-16T19:28:35,907 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4c81b75d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:28:35,907 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7f55aa3b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T19:28:35,907 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T19:28:35,908 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@50ff2063{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T19:28:35,908 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3dc0bdb3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/hadoop.log.dir/,STOPPED} 2024-11-16T19:28:35,909 WARN [BP-1424824963-172.17.0.2-1731785271598 heartbeating to localhost/127.0.0.1:41599 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T19:28:35,909 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T19:28:35,909 WARN [BP-1424824963-172.17.0.2-1731785271598 heartbeating to localhost/127.0.0.1:41599 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1424824963-172.17.0.2-1731785271598 (Datanode Uuid 610fb1e1-0ee3-41c4-8b6b-5fcbf217455a) service to localhost/127.0.0.1:41599 2024-11-16T19:28:35,909 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T19:28:35,909 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data9/current/BP-1424824963-172.17.0.2-1731785271598 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:28:35,910 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/cluster_9e7253c2-e958-3665-264d-c13ed56749c8/data/data10/current/BP-1424824963-172.17.0.2-1731785271598 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:28:35,910 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T19:28:35,915 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7cf515b1{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T19:28:35,916 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@c6abea1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T19:28:35,916 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T19:28:35,916 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e7025d4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T19:28:35,916 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16369da1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/hadoop.log.dir/,STOPPED} 2024-11-16T19:28:35,923 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T19:28:35,955 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T19:28:35,964 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=155 (was 80) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41599 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41599 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007fbe04bf4db8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007fbe04bf4db8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41599 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-19-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:41599 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:36261 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36261 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41599 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41599 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-19-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41599 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:41599 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41599 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41599 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41599 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=452 (was 405) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=140 (was 161), ProcessCount=11 (was 11), AvailableMemoryMB=2402 (was 2825) 2024-11-16T19:28:35,973 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=155, OpenFileDescriptor=452, MaxFileDescriptor=1048576, SystemLoadAverage=140, ProcessCount=11, AvailableMemoryMB=2401 2024-11-16T19:28:35,973 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T19:28:35,973 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/hadoop.log.dir so I do NOT create it in target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34 2024-11-16T19:28:35,973 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f7bec38-55e5-04cf-5954-66d8803bb9d3/hadoop.tmp.dir so I do NOT create it in target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34 2024-11-16T19:28:35,973 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/cluster_032fc1cc-3d9e-2961-e04e-0405007a6b3d, deleteOnExit=true 2024-11-16T19:28:35,973 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T19:28:35,974 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/test.cache.data in system properties and HBase conf 2024-11-16T19:28:35,974 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T19:28:35,974 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/hadoop.log.dir in system properties and HBase conf 2024-11-16T19:28:35,974 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T19:28:35,974 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T19:28:35,974 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T19:28:35,974 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T19:28:35,975 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T19:28:35,975 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T19:28:35,975 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T19:28:35,975 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T19:28:35,975 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T19:28:35,975 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T19:28:35,975 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T19:28:35,975 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T19:28:35,975 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T19:28:35,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T19:28:35,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/nfs.dump.dir in system properties and HBase conf 2024-11-16T19:28:35,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/java.io.tmpdir in system properties and HBase conf 2024-11-16T19:28:35,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T19:28:35,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T19:28:35,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T19:28:35,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:28:35,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:28:35,993 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:28:35,993 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:28:35,993 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:28:35,993 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T19:28:35,994 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:28:35,999 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:28:35,999 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:28:35,999 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:28:36,002 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:28:36,057 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:28:36,061 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T19:28:36,062 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T19:28:36,062 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T19:28:36,062 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T19:28:36,063 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:28:36,063 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7bd03e52{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/hadoop.log.dir/,AVAILABLE} 2024-11-16T19:28:36,064 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@27a49013{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T19:28:36,155 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1915705e{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/java.io.tmpdir/jetty-localhost-40041-hadoop-hdfs-3_4_1-tests_jar-_-any-13914413960937495661/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T19:28:36,156 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@17b2a9ba{HTTP/1.1, (http/1.1)}{localhost:40041} 2024-11-16T19:28:36,156 INFO [Time-limited test {}] server.Server(415): Started @142810ms 2024-11-16T19:28:36,167 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T19:28:36,210 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:28:36,213 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T19:28:36,216 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T19:28:36,216 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T19:28:36,216 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T19:28:36,217 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25f63c50{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/hadoop.log.dir/,AVAILABLE} 2024-11-16T19:28:36,218 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@165d0fad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T19:28:36,311 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6f0827f7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/java.io.tmpdir/jetty-localhost-34075-hadoop-hdfs-3_4_1-tests_jar-_-any-2333052634159165870/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:28:36,311 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2744dc92{HTTP/1.1, (http/1.1)}{localhost:34075} 2024-11-16T19:28:36,311 INFO [Time-limited test {}] server.Server(415): Started @142965ms 2024-11-16T19:28:36,312 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T19:28:36,337 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:28:36,341 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T19:28:36,342 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T19:28:36,342 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T19:28:36,342 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T19:28:36,344 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11255fea{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/hadoop.log.dir/,AVAILABLE} 2024-11-16T19:28:36,344 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3fd7563{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T19:28:36,371 WARN [Thread-1179 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/cluster_032fc1cc-3d9e-2961-e04e-0405007a6b3d/data/data1/current/BP-856716490-172.17.0.2-1731785316016/current, will proceed with Du for space computation calculation, 2024-11-16T19:28:36,371 WARN [Thread-1180 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/cluster_032fc1cc-3d9e-2961-e04e-0405007a6b3d/data/data2/current/BP-856716490-172.17.0.2-1731785316016/current, will proceed with Du for space computation calculation, 2024-11-16T19:28:36,385 WARN [Thread-1158 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T19:28:36,388 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8fb59d54390ecaf2 with lease ID 0x495697860a96bfc2: Processing first storage report for DS-0a4a2b99-55d9-49fb-abfe-e0a0b763b512 from datanode DatanodeRegistration(127.0.0.1:37075, datanodeUuid=1fc598ff-773e-473c-adca-22e7b3ecc203, infoPort=46161, infoSecurePort=0, ipcPort=44523, storageInfo=lv=-57;cid=testClusterID;nsid=1143451536;c=1731785316016) 2024-11-16T19:28:36,388 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8fb59d54390ecaf2 with lease ID 0x495697860a96bfc2: from storage DS-0a4a2b99-55d9-49fb-abfe-e0a0b763b512 node DatanodeRegistration(127.0.0.1:37075, datanodeUuid=1fc598ff-773e-473c-adca-22e7b3ecc203, infoPort=46161, infoSecurePort=0, ipcPort=44523, storageInfo=lv=-57;cid=testClusterID;nsid=1143451536;c=1731785316016), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T19:28:36,388 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8fb59d54390ecaf2 with lease ID 0x495697860a96bfc2: Processing first storage report for DS-20643be2-c0fe-4a7c-a141-edfa643c74c3 from datanode DatanodeRegistration(127.0.0.1:37075, datanodeUuid=1fc598ff-773e-473c-adca-22e7b3ecc203, infoPort=46161, infoSecurePort=0, ipcPort=44523, storageInfo=lv=-57;cid=testClusterID;nsid=1143451536;c=1731785316016) 2024-11-16T19:28:36,388 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8fb59d54390ecaf2 with lease ID 0x495697860a96bfc2: from storage DS-20643be2-c0fe-4a7c-a141-edfa643c74c3 node DatanodeRegistration(127.0.0.1:37075, datanodeUuid=1fc598ff-773e-473c-adca-22e7b3ecc203, infoPort=46161, infoSecurePort=0, ipcPort=44523, storageInfo=lv=-57;cid=testClusterID;nsid=1143451536;c=1731785316016), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:28:36,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:36,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:36,444 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@140caf6f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/java.io.tmpdir/jetty-localhost-38685-hadoop-hdfs-3_4_1-tests_jar-_-any-14454180592881492472/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:28:36,445 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1da660ce{HTTP/1.1, (http/1.1)}{localhost:38685} 2024-11-16T19:28:36,445 INFO [Time-limited test {}] server.Server(415): Started @143099ms 2024-11-16T19:28:36,446 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T19:28:36,509 WARN [Thread-1206 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/cluster_032fc1cc-3d9e-2961-e04e-0405007a6b3d/data/data4/current/BP-856716490-172.17.0.2-1731785316016/current, will proceed with Du for space computation calculation, 2024-11-16T19:28:36,509 WARN [Thread-1205 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/cluster_032fc1cc-3d9e-2961-e04e-0405007a6b3d/data/data3/current/BP-856716490-172.17.0.2-1731785316016/current, will proceed with Du for space computation calculation, 2024-11-16T19:28:36,525 WARN [Thread-1194 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T19:28:36,527 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5f887d5d78ac53d5 with lease ID 0x495697860a96bfc3: Processing first storage report for DS-d869cb24-74d8-4a91-af81-79377e1e695b from datanode DatanodeRegistration(127.0.0.1:45929, datanodeUuid=4bc3b464-663d-4517-a754-fe886e518a90, infoPort=33633, infoSecurePort=0, ipcPort=37427, storageInfo=lv=-57;cid=testClusterID;nsid=1143451536;c=1731785316016) 2024-11-16T19:28:36,527 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5f887d5d78ac53d5 with lease ID 0x495697860a96bfc3: from storage DS-d869cb24-74d8-4a91-af81-79377e1e695b node DatanodeRegistration(127.0.0.1:45929, datanodeUuid=4bc3b464-663d-4517-a754-fe886e518a90, infoPort=33633, infoSecurePort=0, ipcPort=37427, storageInfo=lv=-57;cid=testClusterID;nsid=1143451536;c=1731785316016), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:28:36,527 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5f887d5d78ac53d5 with lease ID 0x495697860a96bfc3: Processing first storage report for DS-f499ef4e-ef96-4d43-84e6-8944543751be from datanode DatanodeRegistration(127.0.0.1:45929, datanodeUuid=4bc3b464-663d-4517-a754-fe886e518a90, infoPort=33633, infoSecurePort=0, ipcPort=37427, storageInfo=lv=-57;cid=testClusterID;nsid=1143451536;c=1731785316016) 2024-11-16T19:28:36,527 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5f887d5d78ac53d5 with lease ID 0x495697860a96bfc3: from storage DS-f499ef4e-ef96-4d43-84e6-8944543751be node DatanodeRegistration(127.0.0.1:45929, datanodeUuid=4bc3b464-663d-4517-a754-fe886e518a90, infoPort=33633, infoSecurePort=0, ipcPort=37427, storageInfo=lv=-57;cid=testClusterID;nsid=1143451536;c=1731785316016), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:28:36,571 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34 2024-11-16T19:28:36,575 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/cluster_032fc1cc-3d9e-2961-e04e-0405007a6b3d/zookeeper_0, clientPort=61191, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/cluster_032fc1cc-3d9e-2961-e04e-0405007a6b3d/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/cluster_032fc1cc-3d9e-2961-e04e-0405007a6b3d/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T19:28:36,577 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=61191 2024-11-16T19:28:36,577 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:28:36,579 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:28:36,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37075 is added to blk_1073741825_1001 (size=7) 2024-11-16T19:28:36,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45929 is added to blk_1073741825_1001 (size=7) 2024-11-16T19:28:36,590 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472 with version=8 2024-11-16T19:28:36,590 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/hbase-staging 2024-11-16T19:28:36,592 INFO [Time-limited test {}] client.ConnectionUtils(128): master/d11ab77873cb:0 server-side Connection retries=45 2024-11-16T19:28:36,592 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T19:28:36,592 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T19:28:36,592 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T19:28:36,592 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T19:28:36,592 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T19:28:36,592 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T19:28:36,592 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T19:28:36,593 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45925 2024-11-16T19:28:36,594 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45925 connecting to ZooKeeper ensemble=127.0.0.1:61191 2024-11-16T19:28:36,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:459250x0, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T19:28:36,598 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45925-0x1004a02354c0000 connected 2024-11-16T19:28:36,612 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:28:36,613 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:28:36,615 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45925-0x1004a02354c0000, quorum=127.0.0.1:61191, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T19:28:36,615 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472, hbase.cluster.distributed=false 2024-11-16T19:28:36,616 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45925-0x1004a02354c0000, quorum=127.0.0.1:61191, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T19:28:36,617 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45925 2024-11-16T19:28:36,617 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45925 2024-11-16T19:28:36,618 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45925 2024-11-16T19:28:36,618 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45925 2024-11-16T19:28:36,618 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45925 2024-11-16T19:28:36,633 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/d11ab77873cb:0 server-side Connection retries=45 2024-11-16T19:28:36,633 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T19:28:36,633 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T19:28:36,633 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T19:28:36,633 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T19:28:36,633 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T19:28:36,633 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T19:28:36,633 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T19:28:36,634 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44973 2024-11-16T19:28:36,635 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44973 connecting to ZooKeeper ensemble=127.0.0.1:61191 2024-11-16T19:28:36,636 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:28:36,637 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:28:36,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:449730x0, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T19:28:36,742 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44973-0x1004a02354c0001 connected 2024-11-16T19:28:36,742 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44973-0x1004a02354c0001, quorum=127.0.0.1:61191, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T19:28:36,743 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T19:28:36,744 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T19:28:36,746 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44973-0x1004a02354c0001, quorum=127.0.0.1:61191, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T19:28:36,749 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44973-0x1004a02354c0001, quorum=127.0.0.1:61191, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T19:28:36,750 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44973 2024-11-16T19:28:36,751 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44973 2024-11-16T19:28:36,752 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44973 2024-11-16T19:28:36,753 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44973 2024-11-16T19:28:36,753 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44973 2024-11-16T19:28:36,767 DEBUG [M:0;d11ab77873cb:45925 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;d11ab77873cb:45925 2024-11-16T19:28:36,767 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/d11ab77873cb,45925,1731785316592 2024-11-16T19:28:36,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44973-0x1004a02354c0001, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T19:28:36,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45925-0x1004a02354c0000, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T19:28:36,769 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45925-0x1004a02354c0000, quorum=127.0.0.1:61191, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/d11ab77873cb,45925,1731785316592 2024-11-16T19:28:36,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45925-0x1004a02354c0000, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:28:36,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44973-0x1004a02354c0001, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T19:28:36,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44973-0x1004a02354c0001, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:28:36,770 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45925-0x1004a02354c0000, quorum=127.0.0.1:61191, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T19:28:36,771 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/d11ab77873cb,45925,1731785316592 from backup master directory 2024-11-16T19:28:36,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45925-0x1004a02354c0000, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/d11ab77873cb,45925,1731785316592 2024-11-16T19:28:36,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44973-0x1004a02354c0001, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T19:28:36,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45925-0x1004a02354c0000, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T19:28:36,772 WARN [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T19:28:36,772 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=d11ab77873cb,45925,1731785316592 2024-11-16T19:28:36,776 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/hbase.id] with ID: dfac9487-d637-4ddf-acf9-3f2fb63b5ea0 2024-11-16T19:28:36,776 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/.tmp/hbase.id 2024-11-16T19:28:36,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37075 is added to blk_1073741826_1002 (size=42) 2024-11-16T19:28:36,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45929 is added to blk_1073741826_1002 (size=42) 2024-11-16T19:28:36,782 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/.tmp/hbase.id]:[hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/hbase.id] 2024-11-16T19:28:36,796 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:28:36,796 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T19:28:36,797 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-16T19:28:36,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45925-0x1004a02354c0000, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:28:36,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44973-0x1004a02354c0001, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:28:36,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37075 is added to blk_1073741827_1003 (size=196) 2024-11-16T19:28:36,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45929 is added to blk_1073741827_1003 (size=196) 2024-11-16T19:28:36,806 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T19:28:36,807 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T19:28:36,807 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T19:28:36,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45929 is added to blk_1073741828_1004 (size=1189) 2024-11-16T19:28:36,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37075 is added to blk_1073741828_1004 (size=1189) 2024-11-16T19:28:36,815 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/data/master/store 2024-11-16T19:28:36,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45929 is added to blk_1073741829_1005 (size=34) 2024-11-16T19:28:36,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37075 is added to blk_1073741829_1005 (size=34) 2024-11-16T19:28:36,822 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:28:36,822 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T19:28:36,822 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:28:36,822 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:28:36,822 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T19:28:36,822 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:28:36,822 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:28:36,822 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731785316822Disabling compacts and flushes for region at 1731785316822Disabling writes for close at 1731785316822Writing region close event to WAL at 1731785316822Closed at 1731785316822 2024-11-16T19:28:36,823 WARN [master/d11ab77873cb:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/data/master/store/.initializing 2024-11-16T19:28:36,823 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/WALs/d11ab77873cb,45925,1731785316592 2024-11-16T19:28:36,825 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d11ab77873cb%2C45925%2C1731785316592, suffix=, logDir=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/WALs/d11ab77873cb,45925,1731785316592, archiveDir=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/oldWALs, maxLogs=10 2024-11-16T19:28:36,826 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C45925%2C1731785316592.1731785316825 2024-11-16T19:28:36,830 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/WALs/d11ab77873cb,45925,1731785316592/d11ab77873cb%2C45925%2C1731785316592.1731785316825 2024-11-16T19:28:36,831 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33633:33633),(127.0.0.1/127.0.0.1:46161:46161)] 2024-11-16T19:28:36,832 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T19:28:36,832 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:28:36,832 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:28:36,832 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:28:36,833 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:28:36,835 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T19:28:36,835 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:28:36,835 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:28:36,835 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:28:36,836 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T19:28:36,836 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:28:36,837 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T19:28:36,837 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:28:36,838 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T19:28:36,838 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:28:36,838 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T19:28:36,838 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:28:36,839 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T19:28:36,839 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:28:36,840 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T19:28:36,840 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:28:36,841 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:28:36,841 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:28:36,842 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:28:36,842 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:28:36,843 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T19:28:36,844 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:28:36,847 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T19:28:36,848 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=848103, jitterRate=0.07841897010803223}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T19:28:36,849 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731785316832Initializing all the Stores at 1731785316833 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785316833Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785316833Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785316833Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785316833Cleaning up temporary data from old regions at 1731785316843 (+10 ms)Region opened successfully at 1731785316849 (+6 ms) 2024-11-16T19:28:36,849 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T19:28:36,853 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a3b706f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d11ab77873cb/172.17.0.2:0 2024-11-16T19:28:36,854 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T19:28:36,854 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T19:28:36,855 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T19:28:36,855 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T19:28:36,855 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-16T19:28:36,856 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T19:28:36,856 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T19:28:36,858 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T19:28:36,859 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45925-0x1004a02354c0000, quorum=127.0.0.1:61191, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T19:28:36,860 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T19:28:36,861 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T19:28:36,861 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45925-0x1004a02354c0000, quorum=127.0.0.1:61191, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T19:28:36,862 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T19:28:36,863 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T19:28:36,864 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45925-0x1004a02354c0000, quorum=127.0.0.1:61191, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T19:28:36,865 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T19:28:36,866 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45925-0x1004a02354c0000, quorum=127.0.0.1:61191, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T19:28:36,867 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T19:28:36,870 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45925-0x1004a02354c0000, quorum=127.0.0.1:61191, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T19:28:36,871 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T19:28:36,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45925-0x1004a02354c0000, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T19:28:36,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44973-0x1004a02354c0001, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T19:28:36,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44973-0x1004a02354c0001, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:28:36,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45925-0x1004a02354c0000, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:28:36,873 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=d11ab77873cb,45925,1731785316592, sessionid=0x1004a02354c0000, setting cluster-up flag (Was=false) 2024-11-16T19:28:36,874 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45925-0x1004a02354c0000, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:28:36,874 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44973-0x1004a02354c0001, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:28:36,876 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T19:28:36,877 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d11ab77873cb,45925,1731785316592 2024-11-16T19:28:36,879 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45925-0x1004a02354c0000, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:28:36,879 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44973-0x1004a02354c0001, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:28:36,882 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T19:28:36,883 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d11ab77873cb,45925,1731785316592 2024-11-16T19:28:36,884 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T19:28:36,885 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T19:28:36,885 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T19:28:36,886 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T19:28:36,886 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: d11ab77873cb,45925,1731785316592 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T19:28:36,887 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/d11ab77873cb:0, corePoolSize=5, maxPoolSize=5 2024-11-16T19:28:36,887 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/d11ab77873cb:0, corePoolSize=5, maxPoolSize=5 2024-11-16T19:28:36,887 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/d11ab77873cb:0, corePoolSize=5, maxPoolSize=5 2024-11-16T19:28:36,887 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/d11ab77873cb:0, corePoolSize=5, maxPoolSize=5 2024-11-16T19:28:36,887 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/d11ab77873cb:0, corePoolSize=10, maxPoolSize=10 2024-11-16T19:28:36,887 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:28:36,887 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/d11ab77873cb:0, corePoolSize=2, maxPoolSize=2 2024-11-16T19:28:36,888 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:28:36,888 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731785346888 2024-11-16T19:28:36,888 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T19:28:36,889 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T19:28:36,889 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T19:28:36,889 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T19:28:36,889 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T19:28:36,889 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T19:28:36,889 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T19:28:36,889 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T19:28:36,890 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:28:36,890 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T19:28:36,893 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T19:28:36,893 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T19:28:36,894 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T19:28:36,894 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T19:28:36,894 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T19:28:36,894 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T19:28:36,894 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.large.0-1731785316894,5,FailOnTimeoutGroup] 2024-11-16T19:28:36,894 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.small.0-1731785316894,5,FailOnTimeoutGroup] 2024-11-16T19:28:36,894 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T19:28:36,894 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T19:28:36,894 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T19:28:36,894 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T19:28:36,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45929 is added to blk_1073741831_1007 (size=1321) 2024-11-16T19:28:36,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37075 is added to blk_1073741831_1007 (size=1321) 2024-11-16T19:28:36,899 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T19:28:36,899 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472 2024-11-16T19:28:36,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37075 is added to blk_1073741832_1008 (size=32) 2024-11-16T19:28:36,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45929 is added to blk_1073741832_1008 (size=32) 2024-11-16T19:28:36,906 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:28:36,907 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T19:28:36,909 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T19:28:36,909 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:28:36,910 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:28:36,910 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T19:28:36,911 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T19:28:36,911 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:28:36,913 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:28:36,913 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T19:28:36,915 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T19:28:36,915 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:28:36,916 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:28:36,916 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T19:28:36,918 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T19:28:36,918 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:28:36,918 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:28:36,918 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T19:28:36,919 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/data/hbase/meta/1588230740 2024-11-16T19:28:36,919 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/data/hbase/meta/1588230740 2024-11-16T19:28:36,921 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T19:28:36,921 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T19:28:36,921 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T19:28:36,922 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T19:28:36,925 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T19:28:36,925 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=832753, jitterRate=0.058900654315948486}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T19:28:36,926 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731785316906Initializing all the Stores at 1731785316907 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785316907Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785316907Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785316907Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785316907Cleaning up temporary data from old regions at 1731785316921 (+14 ms)Region opened successfully at 1731785316926 (+5 ms) 2024-11-16T19:28:36,926 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T19:28:36,926 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T19:28:36,926 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T19:28:36,926 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T19:28:36,926 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T19:28:36,927 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T19:28:36,927 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731785316926Disabling compacts and flushes for region at 1731785316926Disabling writes for close at 1731785316926Writing region close event to WAL at 1731785316927 (+1 ms)Closed at 1731785316927 2024-11-16T19:28:36,928 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T19:28:36,928 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T19:28:36,928 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T19:28:36,930 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T19:28:36,931 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T19:28:36,955 INFO [RS:0;d11ab77873cb:44973 {}] regionserver.HRegionServer(746): ClusterId : dfac9487-d637-4ddf-acf9-3f2fb63b5ea0 2024-11-16T19:28:36,956 DEBUG [RS:0;d11ab77873cb:44973 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T19:28:36,957 DEBUG [RS:0;d11ab77873cb:44973 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T19:28:36,957 DEBUG [RS:0;d11ab77873cb:44973 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T19:28:36,959 DEBUG [RS:0;d11ab77873cb:44973 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T19:28:36,959 DEBUG [RS:0;d11ab77873cb:44973 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37b68f94, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d11ab77873cb/172.17.0.2:0 2024-11-16T19:28:36,970 DEBUG [RS:0;d11ab77873cb:44973 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;d11ab77873cb:44973 2024-11-16T19:28:36,971 INFO [RS:0;d11ab77873cb:44973 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T19:28:36,971 INFO [RS:0;d11ab77873cb:44973 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T19:28:36,971 DEBUG [RS:0;d11ab77873cb:44973 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T19:28:36,972 INFO [RS:0;d11ab77873cb:44973 {}] regionserver.HRegionServer(2659): reportForDuty to master=d11ab77873cb,45925,1731785316592 with port=44973, startcode=1731785316633 2024-11-16T19:28:36,972 DEBUG [RS:0;d11ab77873cb:44973 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T19:28:36,974 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52201, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T19:28:36,974 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45925 {}] master.ServerManager(363): Checking decommissioned status of RegionServer d11ab77873cb,44973,1731785316633 2024-11-16T19:28:36,975 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45925 {}] master.ServerManager(517): Registering regionserver=d11ab77873cb,44973,1731785316633 2024-11-16T19:28:36,976 DEBUG [RS:0;d11ab77873cb:44973 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472 2024-11-16T19:28:36,976 DEBUG [RS:0;d11ab77873cb:44973 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34363 2024-11-16T19:28:36,976 DEBUG [RS:0;d11ab77873cb:44973 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T19:28:36,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45925-0x1004a02354c0000, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T19:28:36,978 DEBUG [RS:0;d11ab77873cb:44973 {}] zookeeper.ZKUtil(111): regionserver:44973-0x1004a02354c0001, quorum=127.0.0.1:61191, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/d11ab77873cb,44973,1731785316633 2024-11-16T19:28:36,978 WARN [RS:0;d11ab77873cb:44973 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T19:28:36,978 INFO [RS:0;d11ab77873cb:44973 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T19:28:36,979 DEBUG [RS:0;d11ab77873cb:44973 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633 2024-11-16T19:28:36,979 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [d11ab77873cb,44973,1731785316633] 2024-11-16T19:28:36,982 INFO [RS:0;d11ab77873cb:44973 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T19:28:36,984 INFO [RS:0;d11ab77873cb:44973 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T19:28:36,984 INFO [RS:0;d11ab77873cb:44973 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T19:28:36,984 INFO [RS:0;d11ab77873cb:44973 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T19:28:36,984 INFO [RS:0;d11ab77873cb:44973 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T19:28:36,985 INFO [RS:0;d11ab77873cb:44973 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T19:28:36,985 INFO [RS:0;d11ab77873cb:44973 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T19:28:36,985 DEBUG [RS:0;d11ab77873cb:44973 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:28:36,986 DEBUG [RS:0;d11ab77873cb:44973 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:28:36,986 DEBUG [RS:0;d11ab77873cb:44973 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:28:36,986 DEBUG [RS:0;d11ab77873cb:44973 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:28:36,986 DEBUG [RS:0;d11ab77873cb:44973 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:28:36,986 DEBUG [RS:0;d11ab77873cb:44973 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/d11ab77873cb:0, corePoolSize=2, maxPoolSize=2 2024-11-16T19:28:36,986 DEBUG [RS:0;d11ab77873cb:44973 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:28:36,986 DEBUG [RS:0;d11ab77873cb:44973 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:28:36,986 DEBUG [RS:0;d11ab77873cb:44973 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:28:36,986 DEBUG [RS:0;d11ab77873cb:44973 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:28:36,986 DEBUG [RS:0;d11ab77873cb:44973 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:28:36,986 DEBUG [RS:0;d11ab77873cb:44973 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:28:36,986 DEBUG [RS:0;d11ab77873cb:44973 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/d11ab77873cb:0, corePoolSize=3, maxPoolSize=3 2024-11-16T19:28:36,986 DEBUG [RS:0;d11ab77873cb:44973 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0, corePoolSize=3, maxPoolSize=3 2024-11-16T19:28:36,989 INFO [RS:0;d11ab77873cb:44973 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T19:28:36,989 INFO [RS:0;d11ab77873cb:44973 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T19:28:36,989 INFO [RS:0;d11ab77873cb:44973 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T19:28:36,990 INFO [RS:0;d11ab77873cb:44973 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T19:28:36,990 INFO [RS:0;d11ab77873cb:44973 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T19:28:36,990 INFO [RS:0;d11ab77873cb:44973 {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,44973,1731785316633-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T19:28:37,003 INFO [RS:0;d11ab77873cb:44973 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T19:28:37,004 INFO [RS:0;d11ab77873cb:44973 {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,44973,1731785316633-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T19:28:37,004 INFO [RS:0;d11ab77873cb:44973 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:28:37,004 INFO [RS:0;d11ab77873cb:44973 {}] regionserver.Replication(171): d11ab77873cb,44973,1731785316633 started 2024-11-16T19:28:37,017 INFO [RS:0;d11ab77873cb:44973 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:28:37,017 INFO [RS:0;d11ab77873cb:44973 {}] regionserver.HRegionServer(1482): Serving as d11ab77873cb,44973,1731785316633, RpcServer on d11ab77873cb/172.17.0.2:44973, sessionid=0x1004a02354c0001 2024-11-16T19:28:37,017 DEBUG [RS:0;d11ab77873cb:44973 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T19:28:37,017 DEBUG [RS:0;d11ab77873cb:44973 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager d11ab77873cb,44973,1731785316633 2024-11-16T19:28:37,017 DEBUG [RS:0;d11ab77873cb:44973 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd11ab77873cb,44973,1731785316633' 2024-11-16T19:28:37,017 DEBUG [RS:0;d11ab77873cb:44973 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T19:28:37,018 DEBUG [RS:0;d11ab77873cb:44973 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T19:28:37,019 DEBUG [RS:0;d11ab77873cb:44973 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T19:28:37,019 DEBUG [RS:0;d11ab77873cb:44973 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T19:28:37,019 DEBUG [RS:0;d11ab77873cb:44973 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager d11ab77873cb,44973,1731785316633 2024-11-16T19:28:37,019 DEBUG [RS:0;d11ab77873cb:44973 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd11ab77873cb,44973,1731785316633' 2024-11-16T19:28:37,019 DEBUG [RS:0;d11ab77873cb:44973 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T19:28:37,019 DEBUG [RS:0;d11ab77873cb:44973 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T19:28:37,019 DEBUG [RS:0;d11ab77873cb:44973 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T19:28:37,019 INFO [RS:0;d11ab77873cb:44973 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T19:28:37,019 INFO [RS:0;d11ab77873cb:44973 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T19:28:37,081 WARN [d11ab77873cb:45925 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-16T19:28:37,124 INFO [RS:0;d11ab77873cb:44973 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d11ab77873cb%2C44973%2C1731785316633, suffix=, logDir=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633, archiveDir=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/oldWALs, maxLogs=32 2024-11-16T19:28:37,126 INFO [RS:0;d11ab77873cb:44973 {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C44973%2C1731785316633.1731785317125 2024-11-16T19:28:37,136 INFO [RS:0;d11ab77873cb:44973 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785317125 2024-11-16T19:28:37,137 DEBUG [RS:0;d11ab77873cb:44973 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33633:33633),(127.0.0.1/127.0.0.1:46161:46161)] 2024-11-16T19:28:37,332 DEBUG [d11ab77873cb:45925 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T19:28:37,333 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=d11ab77873cb,44973,1731785316633 2024-11-16T19:28:37,337 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d11ab77873cb,44973,1731785316633, state=OPENING 2024-11-16T19:28:37,340 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T19:28:37,342 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45925-0x1004a02354c0000, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:28:37,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44973-0x1004a02354c0001, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:28:37,344 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T19:28:37,344 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T19:28:37,344 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T19:28:37,344 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=d11ab77873cb,44973,1731785316633}] 2024-11-16T19:28:37,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:37,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:37,498 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T19:28:37,503 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37003, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T19:28:37,509 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T19:28:37,509 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T19:28:37,511 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d11ab77873cb%2C44973%2C1731785316633.meta, suffix=.meta, logDir=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633, archiveDir=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/oldWALs, maxLogs=32 2024-11-16T19:28:37,512 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C44973%2C1731785316633.meta.1731785317511.meta 2024-11-16T19:28:37,516 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.meta.1731785317511.meta 2024-11-16T19:28:37,517 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46161:46161),(127.0.0.1/127.0.0.1:33633:33633)] 2024-11-16T19:28:37,518 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T19:28:37,518 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T19:28:37,518 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T19:28:37,518 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T19:28:37,518 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T19:28:37,519 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:28:37,519 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T19:28:37,519 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T19:28:37,520 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T19:28:37,521 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T19:28:37,521 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:28:37,522 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:28:37,522 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T19:28:37,523 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T19:28:37,523 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:28:37,523 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:28:37,523 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T19:28:37,524 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T19:28:37,524 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:28:37,525 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:28:37,525 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T19:28:37,526 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T19:28:37,526 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:28:37,527 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:28:37,527 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T19:28:37,528 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/data/hbase/meta/1588230740 2024-11-16T19:28:37,529 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/data/hbase/meta/1588230740 2024-11-16T19:28:37,531 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T19:28:37,531 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T19:28:37,532 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T19:28:37,533 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T19:28:37,534 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=798115, jitterRate=0.014856308698654175}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T19:28:37,534 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T19:28:37,535 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731785317519Writing region info on filesystem at 1731785317519Initializing all the Stores at 1731785317520 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785317520Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785317520Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785317520Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785317520Cleaning up temporary data from old regions at 1731785317531 (+11 ms)Running coprocessor post-open hooks at 1731785317534 (+3 ms)Region opened successfully at 1731785317535 (+1 ms) 2024-11-16T19:28:37,536 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731785317497 2024-11-16T19:28:37,538 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T19:28:37,538 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T19:28:37,539 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=d11ab77873cb,44973,1731785316633 2024-11-16T19:28:37,539 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d11ab77873cb,44973,1731785316633, state=OPEN 2024-11-16T19:28:37,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45925-0x1004a02354c0000, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T19:28:37,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44973-0x1004a02354c0001, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T19:28:37,542 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=d11ab77873cb,44973,1731785316633 2024-11-16T19:28:37,542 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T19:28:37,542 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T19:28:37,544 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T19:28:37,544 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=d11ab77873cb,44973,1731785316633 in 198 msec 2024-11-16T19:28:37,546 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T19:28:37,546 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 616 msec 2024-11-16T19:28:37,547 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T19:28:37,548 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T19:28:37,549 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T19:28:37,549 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d11ab77873cb,44973,1731785316633, seqNum=-1] 2024-11-16T19:28:37,549 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T19:28:37,550 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46625, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T19:28:37,556 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 670 msec 2024-11-16T19:28:37,556 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731785317556, completionTime=-1 2024-11-16T19:28:37,556 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T19:28:37,557 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-16T19:28:37,559 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-16T19:28:37,559 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731785377559 2024-11-16T19:28:37,559 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731785437559 2024-11-16T19:28:37,559 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-16T19:28:37,559 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,45925,1731785316592-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T19:28:37,559 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,45925,1731785316592-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:28:37,559 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,45925,1731785316592-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:28:37,559 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-d11ab77873cb:45925, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:28:37,559 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T19:28:37,560 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T19:28:37,561 DEBUG [master/d11ab77873cb:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T19:28:37,563 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.791sec 2024-11-16T19:28:37,563 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T19:28:37,563 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T19:28:37,564 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T19:28:37,564 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T19:28:37,564 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T19:28:37,564 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,45925,1731785316592-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T19:28:37,564 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,45925,1731785316592-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T19:28:37,567 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T19:28:37,567 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T19:28:37,567 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,45925,1731785316592-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:28:37,658 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40375d59, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T19:28:37,658 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request d11ab77873cb,45925,-1 for getting cluster id 2024-11-16T19:28:37,659 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T19:28:37,662 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dfac9487-d637-4ddf-acf9-3f2fb63b5ea0' 2024-11-16T19:28:37,662 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T19:28:37,662 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dfac9487-d637-4ddf-acf9-3f2fb63b5ea0" 2024-11-16T19:28:37,663 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66077482, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T19:28:37,663 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [d11ab77873cb,45925,-1] 2024-11-16T19:28:37,663 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T19:28:37,663 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:28:37,665 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56160, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T19:28:37,666 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6144f613, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T19:28:37,666 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T19:28:37,667 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d11ab77873cb,44973,1731785316633, seqNum=-1] 2024-11-16T19:28:37,668 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T19:28:37,669 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58984, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T19:28:37,672 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=d11ab77873cb,45925,1731785316592 2024-11-16T19:28:37,672 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:28:37,675 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T19:28:37,675 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-16T19:28:37,675 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-16T19:28:37,676 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-16T19:28:37,677 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is d11ab77873cb,45925,1731785316592 2024-11-16T19:28:37,677 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@21ad8a89 2024-11-16T19:28:37,677 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-16T19:28:37,680 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56164, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-16T19:28:37,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45925 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-16T19:28:37,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45925 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-16T19:28:37,681 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45925 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T19:28:37,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45925 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-16T19:28:37,685 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T19:28:37,685 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:28:37,685 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45925 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-16T19:28:37,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45925 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T19:28:37,686 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T19:28:37,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45929 is added to blk_1073741835_1011 (size=395) 2024-11-16T19:28:37,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37075 is added to blk_1073741835_1011 (size=395) 2024-11-16T19:28:38,099 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 708895c41838008cb4f83bd2c991c364, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731785317681.708895c41838008cb4f83bd2c991c364.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472 2024-11-16T19:28:38,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45929 is added to blk_1073741836_1012 (size=78) 2024-11-16T19:28:38,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37075 is added to blk_1073741836_1012 (size=78) 2024-11-16T19:28:38,109 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731785317681.708895c41838008cb4f83bd2c991c364.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:28:38,109 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 708895c41838008cb4f83bd2c991c364, disabling compactions & flushes 2024-11-16T19:28:38,109 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731785317681.708895c41838008cb4f83bd2c991c364. 2024-11-16T19:28:38,109 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731785317681.708895c41838008cb4f83bd2c991c364. 2024-11-16T19:28:38,109 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731785317681.708895c41838008cb4f83bd2c991c364. after waiting 0 ms 2024-11-16T19:28:38,109 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731785317681.708895c41838008cb4f83bd2c991c364. 2024-11-16T19:28:38,110 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731785317681.708895c41838008cb4f83bd2c991c364. 2024-11-16T19:28:38,110 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 708895c41838008cb4f83bd2c991c364: Waiting for close lock at 1731785318109Disabling compacts and flushes for region at 1731785318109Disabling writes for close at 1731785318109Writing region close event to WAL at 1731785318109Closed at 1731785318109 2024-11-16T19:28:38,111 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T19:28:38,112 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731785317681.708895c41838008cb4f83bd2c991c364.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731785318111"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731785318111"}]},"ts":"1731785318111"} 2024-11-16T19:28:38,114 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-16T19:28:38,116 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T19:28:38,116 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731785318116"}]},"ts":"1731785318116"} 2024-11-16T19:28:38,119 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-16T19:28:38,119 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=708895c41838008cb4f83bd2c991c364, ASSIGN}] 2024-11-16T19:28:38,121 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=708895c41838008cb4f83bd2c991c364, ASSIGN 2024-11-16T19:28:38,122 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=708895c41838008cb4f83bd2c991c364, ASSIGN; state=OFFLINE, location=d11ab77873cb,44973,1731785316633; forceNewPlan=false, retain=false 2024-11-16T19:28:38,273 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=708895c41838008cb4f83bd2c991c364, regionState=OPENING, regionLocation=d11ab77873cb,44973,1731785316633 2024-11-16T19:28:38,275 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=708895c41838008cb4f83bd2c991c364, ASSIGN because future has completed 2024-11-16T19:28:38,275 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 708895c41838008cb4f83bd2c991c364, server=d11ab77873cb,44973,1731785316633}] 2024-11-16T19:28:38,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:38,438 INFO [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731785317681.708895c41838008cb4f83bd2c991c364. 2024-11-16T19:28:38,438 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 708895c41838008cb4f83bd2c991c364, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731785317681.708895c41838008cb4f83bd2c991c364.', STARTKEY => '', ENDKEY => ''} 2024-11-16T19:28:38,439 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 708895c41838008cb4f83bd2c991c364 2024-11-16T19:28:38,439 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731785317681.708895c41838008cb4f83bd2c991c364.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:28:38,439 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 708895c41838008cb4f83bd2c991c364 2024-11-16T19:28:38,439 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 708895c41838008cb4f83bd2c991c364 2024-11-16T19:28:38,442 INFO [StoreOpener-708895c41838008cb4f83bd2c991c364-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 708895c41838008cb4f83bd2c991c364 2024-11-16T19:28:38,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:38,444 INFO [StoreOpener-708895c41838008cb4f83bd2c991c364-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 708895c41838008cb4f83bd2c991c364 columnFamilyName info 2024-11-16T19:28:38,444 DEBUG [StoreOpener-708895c41838008cb4f83bd2c991c364-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:28:38,445 INFO [StoreOpener-708895c41838008cb4f83bd2c991c364-1 {}] regionserver.HStore(327): Store=708895c41838008cb4f83bd2c991c364/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T19:28:38,445 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 708895c41838008cb4f83bd2c991c364 2024-11-16T19:28:38,446 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/data/default/TestLogRolling-testLogRollOnPipelineRestart/708895c41838008cb4f83bd2c991c364 2024-11-16T19:28:38,447 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/data/default/TestLogRolling-testLogRollOnPipelineRestart/708895c41838008cb4f83bd2c991c364 2024-11-16T19:28:38,448 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 708895c41838008cb4f83bd2c991c364 2024-11-16T19:28:38,448 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 708895c41838008cb4f83bd2c991c364 2024-11-16T19:28:38,450 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 708895c41838008cb4f83bd2c991c364 2024-11-16T19:28:38,453 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/data/default/TestLogRolling-testLogRollOnPipelineRestart/708895c41838008cb4f83bd2c991c364/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T19:28:38,454 INFO [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 708895c41838008cb4f83bd2c991c364; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=844080, jitterRate=0.07330352067947388}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T19:28:38,454 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 708895c41838008cb4f83bd2c991c364 2024-11-16T19:28:38,454 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-16T19:28:38,455 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 708895c41838008cb4f83bd2c991c364: Running coprocessor pre-open hook at 1731785318439Writing region info on filesystem at 1731785318439Initializing all the Stores at 1731785318441 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785318441Cleaning up temporary data from old regions at 1731785318448 (+7 ms)Running coprocessor post-open hooks at 1731785318454 (+6 ms)Region opened successfully at 1731785318455 (+1 ms) 2024-11-16T19:28:38,456 INFO [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731785317681.708895c41838008cb4f83bd2c991c364., pid=6, masterSystemTime=1731785318429 2024-11-16T19:28:38,458 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731785317681.708895c41838008cb4f83bd2c991c364. 2024-11-16T19:28:38,459 INFO [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731785317681.708895c41838008cb4f83bd2c991c364. 2024-11-16T19:28:38,460 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=708895c41838008cb4f83bd2c991c364, regionState=OPEN, openSeqNum=2, regionLocation=d11ab77873cb,44973,1731785316633 2024-11-16T19:28:38,462 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 708895c41838008cb4f83bd2c991c364, server=d11ab77873cb,44973,1731785316633 because future has completed 2024-11-16T19:28:38,466 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-16T19:28:38,466 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 708895c41838008cb4f83bd2c991c364, server=d11ab77873cb,44973,1731785316633 in 189 msec 2024-11-16T19:28:38,469 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-16T19:28:38,469 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=708895c41838008cb4f83bd2c991c364, ASSIGN in 347 msec 2024-11-16T19:28:38,470 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T19:28:38,470 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731785318470"}]},"ts":"1731785318470"} 2024-11-16T19:28:38,472 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-16T19:28:38,473 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T19:28:38,475 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 792 msec 2024-11-16T19:28:39,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:39,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:40,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:40,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:41,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:41,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:42,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:42,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:43,024 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T19:28:43,046 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:28:43,047 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:28:43,047 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:28:43,047 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:28:43,047 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:28:43,048 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:28:43,051 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:28:43,052 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:28:43,052 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:28:43,054 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:28:43,059 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T19:28:43,059 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-16T19:28:43,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:43,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:44,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:44,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:45,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:45,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:46,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:46,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:47,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:47,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:47,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45925 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T19:28:47,730 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-16T19:28:47,730 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-16T19:28:47,738 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-16T19:28:47,738 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731785317681.708895c41838008cb4f83bd2c991c364. 2024-11-16T19:28:47,743 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731785317681.708895c41838008cb4f83bd2c991c364., hostname=d11ab77873cb,44973,1731785316633, seqNum=2] 2024-11-16T19:28:48,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:48,454 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-16T19:28:48,455 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-16T19:28:48,454 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:49,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:49,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:49,746 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785317125 2024-11-16T19:28:49,747 WARN [ResponseProcessor for block BP-856716490-172.17.0.2-1731785316016:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-856716490-172.17.0.2-1731785316016:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:49,747 WARN [ResponseProcessor for block BP-856716490-172.17.0.2-1731785316016:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-856716490-172.17.0.2-1731785316016:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:49,747 WARN [ResponseProcessor for block BP-856716490-172.17.0.2-1731785316016:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-856716490-172.17.0.2-1731785316016:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-856716490-172.17.0.2-1731785316016:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:45929,DS-d869cb24-74d8-4a91-af81-79377e1e695b,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:49,747 WARN [DataStreamer for file /user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785317125 block BP-856716490-172.17.0.2-1731785316016:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-856716490-172.17.0.2-1731785316016:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45929,DS-d869cb24-74d8-4a91-af81-79377e1e695b,DISK], DatanodeInfoWithStorage[127.0.0.1:37075,DS-0a4a2b99-55d9-49fb-abfe-e0a0b763b512,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45929,DS-d869cb24-74d8-4a91-af81-79377e1e695b,DISK]) is bad. 2024-11-16T19:28:49,747 WARN [DataStreamer for file /user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/WALs/d11ab77873cb,45925,1731785316592/d11ab77873cb%2C45925%2C1731785316592.1731785316825 block BP-856716490-172.17.0.2-1731785316016:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-856716490-172.17.0.2-1731785316016:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45929,DS-d869cb24-74d8-4a91-af81-79377e1e695b,DISK], DatanodeInfoWithStorage[127.0.0.1:37075,DS-0a4a2b99-55d9-49fb-abfe-e0a0b763b512,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45929,DS-d869cb24-74d8-4a91-af81-79377e1e695b,DISK]) is bad. 2024-11-16T19:28:49,747 WARN [DataStreamer for file /user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.meta.1731785317511.meta block BP-856716490-172.17.0.2-1731785316016:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-856716490-172.17.0.2-1731785316016:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37075,DS-0a4a2b99-55d9-49fb-abfe-e0a0b763b512,DISK], DatanodeInfoWithStorage[127.0.0.1:45929,DS-d869cb24-74d8-4a91-af81-79377e1e695b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45929,DS-d869cb24-74d8-4a91-af81-79377e1e695b,DISK]) is bad. 2024-11-16T19:28:49,747 WARN [PacketResponder: BP-856716490-172.17.0.2-1731785316016:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:45929] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:49,748 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1773942762_22 at /127.0.0.1:40214 [Receiving block BP-856716490-172.17.0.2-1731785316016:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:37075:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40214 dst: /127.0.0.1:37075 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:49,748 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1773942762_22 at /127.0.0.1:50802 [Receiving block BP-856716490-172.17.0.2-1731785316016:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:45929:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50802 dst: /127.0.0.1:45929 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:49,748 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1167945216_22 at /127.0.0.1:50776 [Receiving block BP-856716490-172.17.0.2-1731785316016:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:45929:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50776 dst: /127.0.0.1:45929 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:49,749 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1167945216_22 at /127.0.0.1:40178 [Receiving block BP-856716490-172.17.0.2-1731785316016:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:37075:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40178 dst: /127.0.0.1:37075 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:49,749 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1773942762_22 at /127.0.0.1:50818 [Receiving block BP-856716490-172.17.0.2-1731785316016:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:45929:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50818 dst: /127.0.0.1:45929 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:49,749 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1773942762_22 at /127.0.0.1:40200 [Receiving block BP-856716490-172.17.0.2-1731785316016:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:37075:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40200 dst: /127.0.0.1:37075 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:49,752 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@140caf6f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:28:49,753 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1da660ce{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T19:28:49,753 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T19:28:49,753 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3fd7563{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T19:28:49,754 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11255fea{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/hadoop.log.dir/,STOPPED} 2024-11-16T19:28:49,755 WARN [BP-856716490-172.17.0.2-1731785316016 heartbeating to localhost/127.0.0.1:34363 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T19:28:49,755 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T19:28:49,755 WARN [BP-856716490-172.17.0.2-1731785316016 heartbeating to localhost/127.0.0.1:34363 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-856716490-172.17.0.2-1731785316016 (Datanode Uuid 4bc3b464-663d-4517-a754-fe886e518a90) service to localhost/127.0.0.1:34363 2024-11-16T19:28:49,755 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T19:28:49,756 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/cluster_032fc1cc-3d9e-2961-e04e-0405007a6b3d/data/data3/current/BP-856716490-172.17.0.2-1731785316016 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:28:49,756 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/cluster_032fc1cc-3d9e-2961-e04e-0405007a6b3d/data/data4/current/BP-856716490-172.17.0.2-1731785316016 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:28:49,756 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T19:28:49,764 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:28:49,767 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T19:28:49,768 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T19:28:49,768 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T19:28:49,768 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T19:28:49,769 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5960bf29{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/hadoop.log.dir/,AVAILABLE} 2024-11-16T19:28:49,769 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c07ac8c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T19:28:49,865 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6a4031cc{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/java.io.tmpdir/jetty-localhost-36357-hadoop-hdfs-3_4_1-tests_jar-_-any-12369082130830063681/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:28:49,865 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@126bd190{HTTP/1.1, (http/1.1)}{localhost:36357} 2024-11-16T19:28:49,865 INFO [Time-limited test {}] server.Server(415): Started @156519ms 2024-11-16T19:28:49,866 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T19:28:49,885 WARN [ResponseProcessor for block BP-856716490-172.17.0.2-1731785316016:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-856716490-172.17.0.2-1731785316016:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:49,885 WARN [ResponseProcessor for block BP-856716490-172.17.0.2-1731785316016:blk_1073741834_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-856716490-172.17.0.2-1731785316016:blk_1073741834_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:49,885 WARN [ResponseProcessor for block BP-856716490-172.17.0.2-1731785316016:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-856716490-172.17.0.2-1731785316016:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:49,886 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1773942762_22 at /127.0.0.1:39690 [Receiving block BP-856716490-172.17.0.2-1731785316016:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:37075:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39690 dst: /127.0.0.1:37075 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:49,886 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1773942762_22 at /127.0.0.1:39708 [Receiving block BP-856716490-172.17.0.2-1731785316016:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:37075:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39708 dst: /127.0.0.1:37075 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:49,886 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1167945216_22 at /127.0.0.1:39688 [Receiving block BP-856716490-172.17.0.2-1731785316016:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:37075:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39688 dst: /127.0.0.1:37075 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:49,893 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6f0827f7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:28:49,893 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2744dc92{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T19:28:49,893 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T19:28:49,893 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@165d0fad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T19:28:49,893 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25f63c50{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/hadoop.log.dir/,STOPPED} 2024-11-16T19:28:49,894 WARN [BP-856716490-172.17.0.2-1731785316016 heartbeating to localhost/127.0.0.1:34363 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T19:28:49,894 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T19:28:49,894 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T19:28:49,894 WARN [BP-856716490-172.17.0.2-1731785316016 heartbeating to localhost/127.0.0.1:34363 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-856716490-172.17.0.2-1731785316016 (Datanode Uuid 1fc598ff-773e-473c-adca-22e7b3ecc203) service to localhost/127.0.0.1:34363 2024-11-16T19:28:49,895 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/cluster_032fc1cc-3d9e-2961-e04e-0405007a6b3d/data/data1/current/BP-856716490-172.17.0.2-1731785316016 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:28:49,895 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/cluster_032fc1cc-3d9e-2961-e04e-0405007a6b3d/data/data2/current/BP-856716490-172.17.0.2-1731785316016 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:28:49,895 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T19:28:49,902 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:28:49,905 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T19:28:49,905 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T19:28:49,906 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T19:28:49,906 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T19:28:49,906 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@176f5faa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/hadoop.log.dir/,AVAILABLE} 2024-11-16T19:28:49,906 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3237c5f1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T19:28:49,941 WARN [Thread-1329 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T19:28:49,944 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1f6170ff861d9b7a with lease ID 0x495697860a96bfc4: from storage DS-d869cb24-74d8-4a91-af81-79377e1e695b node DatanodeRegistration(127.0.0.1:39489, datanodeUuid=4bc3b464-663d-4517-a754-fe886e518a90, infoPort=38997, infoSecurePort=0, ipcPort=37931, storageInfo=lv=-57;cid=testClusterID;nsid=1143451536;c=1731785316016), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:28:49,945 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1f6170ff861d9b7a with lease ID 0x495697860a96bfc4: from storage DS-f499ef4e-ef96-4d43-84e6-8944543751be node DatanodeRegistration(127.0.0.1:39489, datanodeUuid=4bc3b464-663d-4517-a754-fe886e518a90, infoPort=38997, infoSecurePort=0, ipcPort=37931, storageInfo=lv=-57;cid=testClusterID;nsid=1143451536;c=1731785316016), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:28:50,007 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@358d2587{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/java.io.tmpdir/jetty-localhost-45793-hadoop-hdfs-3_4_1-tests_jar-_-any-13776404296214602831/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:28:50,007 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2e49578b{HTTP/1.1, (http/1.1)}{localhost:45793} 2024-11-16T19:28:50,007 INFO [Time-limited test {}] server.Server(415): Started @156661ms 2024-11-16T19:28:50,008 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T19:28:50,071 WARN [Thread-1360 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T19:28:50,073 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x490a37cf9377d2f3 with lease ID 0x495697860a96bfc5: from storage DS-0a4a2b99-55d9-49fb-abfe-e0a0b763b512 node DatanodeRegistration(127.0.0.1:43807, datanodeUuid=1fc598ff-773e-473c-adca-22e7b3ecc203, infoPort=41673, infoSecurePort=0, ipcPort=45571, storageInfo=lv=-57;cid=testClusterID;nsid=1143451536;c=1731785316016), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:28:50,073 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x490a37cf9377d2f3 with lease ID 0x495697860a96bfc5: from storage DS-20643be2-c0fe-4a7c-a141-edfa643c74c3 node DatanodeRegistration(127.0.0.1:43807, datanodeUuid=1fc598ff-773e-473c-adca-22e7b3ecc203, infoPort=41673, infoSecurePort=0, ipcPort=45571, storageInfo=lv=-57;cid=testClusterID;nsid=1143451536;c=1731785316016), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:28:50,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:50,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:51,026 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-16T19:28:51,032 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-16T19:28:51,035 ERROR [FSHLog-0-hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472-prefix:d11ab77873cb,44973,1731785316633 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37075,DS-0a4a2b99-55d9-49fb-abfe-e0a0b763b512,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:51,035 WARN [FSHLog-0-hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472-prefix:d11ab77873cb,44973,1731785316633 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37075,DS-0a4a2b99-55d9-49fb-abfe-e0a0b763b512,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:51,036 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d11ab77873cb%2C44973%2C1731785316633:(num 1731785317125) roll requested 2024-11-16T19:28:51,036 INFO [regionserver/d11ab77873cb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C44973%2C1731785316633.1731785331036 2024-11-16T19:28:51,043 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785317125 newFile=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785331036 2024-11-16T19:28:51,043 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:51,043 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:51,043 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:51,043 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:51,043 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:51,043 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785317125 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785331036 2024-11-16T19:28:51,044 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37075,DS-0a4a2b99-55d9-49fb-abfe-e0a0b763b512,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:51,044 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37075,DS-0a4a2b99-55d9-49fb-abfe-e0a0b763b512,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:51,044 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785317125 2024-11-16T19:28:51,044 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41673:41673),(127.0.0.1/127.0.0.1:38997:38997)] 2024-11-16T19:28:51,044 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785317125 is not closed yet, will try archiving it next time 2024-11-16T19:28:51,044 WARN [IPC Server handler 4 on default port 34363 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785317125 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-16T19:28:51,045 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785317125 after 1ms 2024-11-16T19:28:51,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:51,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:51,946 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-16T19:28:52,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:52,458 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:53,050 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-16T19:28:53,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:53,459 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:54,454 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:54,461 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:55,046 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785317125 after 4002ms 2024-11-16T19:28:55,054 WARN [ResponseProcessor for block BP-856716490-172.17.0.2-1731785316016:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-856716490-172.17.0.2-1731785316016:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:55,055 WARN [DataStreamer for file /user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785331036 block BP-856716490-172.17.0.2-1731785316016:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-856716490-172.17.0.2-1731785316016:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43807,DS-0a4a2b99-55d9-49fb-abfe-e0a0b763b512,DISK], DatanodeInfoWithStorage[127.0.0.1:39489,DS-d869cb24-74d8-4a91-af81-79377e1e695b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43807,DS-0a4a2b99-55d9-49fb-abfe-e0a0b763b512,DISK]) is bad. 2024-11-16T19:28:55,056 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1773942762_22 at /127.0.0.1:48348 [Receiving block BP-856716490-172.17.0.2-1731785316016:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:43807:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48348 dst: /127.0.0.1:43807 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:55,057 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1773942762_22 at /127.0.0.1:47152 [Receiving block BP-856716490-172.17.0.2-1731785316016:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:39489:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47152 dst: /127.0.0.1:39489 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:55,059 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@358d2587{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:28:55,060 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2e49578b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T19:28:55,060 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T19:28:55,060 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3237c5f1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T19:28:55,061 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@176f5faa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/hadoop.log.dir/,STOPPED} 2024-11-16T19:28:55,062 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T19:28:55,062 WARN [BP-856716490-172.17.0.2-1731785316016 heartbeating to localhost/127.0.0.1:34363 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T19:28:55,062 WARN [BP-856716490-172.17.0.2-1731785316016 heartbeating to localhost/127.0.0.1:34363 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-856716490-172.17.0.2-1731785316016 (Datanode Uuid 1fc598ff-773e-473c-adca-22e7b3ecc203) service to localhost/127.0.0.1:34363 2024-11-16T19:28:55,062 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T19:28:55,063 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/cluster_032fc1cc-3d9e-2961-e04e-0405007a6b3d/data/data1/current/BP-856716490-172.17.0.2-1731785316016 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:28:55,063 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/cluster_032fc1cc-3d9e-2961-e04e-0405007a6b3d/data/data2/current/BP-856716490-172.17.0.2-1731785316016 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:28:55,064 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T19:28:55,070 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:28:55,072 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T19:28:55,073 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T19:28:55,073 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T19:28:55,073 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T19:28:55,073 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@718ea2f4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/hadoop.log.dir/,AVAILABLE} 2024-11-16T19:28:55,074 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@167fd01b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T19:28:55,166 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2ca8564b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/java.io.tmpdir/jetty-localhost-45189-hadoop-hdfs-3_4_1-tests_jar-_-any-1425657241802039518/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:28:55,167 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@fa662a{HTTP/1.1, (http/1.1)}{localhost:45189} 2024-11-16T19:28:55,167 INFO [Time-limited test {}] server.Server(415): Started @161821ms 2024-11-16T19:28:55,168 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T19:28:55,189 WARN [ResponseProcessor for block BP-856716490-172.17.0.2-1731785316016:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-856716490-172.17.0.2-1731785316016:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:55,189 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1773942762_22 at /127.0.0.1:47176 [Receiving block BP-856716490-172.17.0.2-1731785316016:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:39489:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47176 dst: /127.0.0.1:39489 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:55,193 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6a4031cc{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:28:55,193 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@126bd190{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T19:28:55,193 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T19:28:55,193 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c07ac8c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T19:28:55,193 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5960bf29{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/hadoop.log.dir/,STOPPED} 2024-11-16T19:28:55,198 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T19:28:55,198 WARN [BP-856716490-172.17.0.2-1731785316016 heartbeating to localhost/127.0.0.1:34363 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T19:28:55,198 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T19:28:55,198 WARN [BP-856716490-172.17.0.2-1731785316016 heartbeating to localhost/127.0.0.1:34363 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-856716490-172.17.0.2-1731785316016 (Datanode Uuid 4bc3b464-663d-4517-a754-fe886e518a90) service to localhost/127.0.0.1:34363 2024-11-16T19:28:55,198 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/cluster_032fc1cc-3d9e-2961-e04e-0405007a6b3d/data/data3/current/BP-856716490-172.17.0.2-1731785316016 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:28:55,198 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/cluster_032fc1cc-3d9e-2961-e04e-0405007a6b3d/data/data4/current/BP-856716490-172.17.0.2-1731785316016 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:28:55,199 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T19:28:55,206 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:28:55,209 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T19:28:55,210 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T19:28:55,210 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T19:28:55,210 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T19:28:55,210 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@760c54cf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/hadoop.log.dir/,AVAILABLE} 2024-11-16T19:28:55,211 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2026736d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T19:28:55,275 WARN [Thread-1403 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T19:28:55,277 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc7458e4c1eaa3627 with lease ID 0x495697860a96bfc6: from storage DS-0a4a2b99-55d9-49fb-abfe-e0a0b763b512 node DatanodeRegistration(127.0.0.1:42329, datanodeUuid=1fc598ff-773e-473c-adca-22e7b3ecc203, infoPort=41331, infoSecurePort=0, ipcPort=42921, storageInfo=lv=-57;cid=testClusterID;nsid=1143451536;c=1731785316016), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:28:55,277 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc7458e4c1eaa3627 with lease ID 0x495697860a96bfc6: from storage DS-20643be2-c0fe-4a7c-a141-edfa643c74c3 node DatanodeRegistration(127.0.0.1:42329, datanodeUuid=1fc598ff-773e-473c-adca-22e7b3ecc203, infoPort=41331, infoSecurePort=0, ipcPort=42921, storageInfo=lv=-57;cid=testClusterID;nsid=1143451536;c=1731785316016), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:28:55,329 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@528eeea6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/java.io.tmpdir/jetty-localhost-45151-hadoop-hdfs-3_4_1-tests_jar-_-any-2469734716337722973/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:28:55,329 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@20466b6{HTTP/1.1, (http/1.1)}{localhost:45151} 2024-11-16T19:28:55,329 INFO [Time-limited test {}] server.Server(415): Started @161984ms 2024-11-16T19:28:55,331 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T19:28:55,395 WARN [Thread-1434 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T19:28:55,397 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4d6871b54ffe3b7b with lease ID 0x495697860a96bfc7: from storage DS-d869cb24-74d8-4a91-af81-79377e1e695b node DatanodeRegistration(127.0.0.1:45445, datanodeUuid=4bc3b464-663d-4517-a754-fe886e518a90, infoPort=40135, infoSecurePort=0, ipcPort=46769, storageInfo=lv=-57;cid=testClusterID;nsid=1143451536;c=1731785316016), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:28:55,397 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4d6871b54ffe3b7b with lease ID 0x495697860a96bfc7: from storage DS-f499ef4e-ef96-4d43-84e6-8944543751be node DatanodeRegistration(127.0.0.1:45445, datanodeUuid=4bc3b464-663d-4517-a754-fe886e518a90, infoPort=40135, infoSecurePort=0, ipcPort=46769, storageInfo=lv=-57;cid=testClusterID;nsid=1143451536;c=1731785316016), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:28:55,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:55,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:56,348 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-16T19:28:56,352 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-16T19:28:56,355 ERROR [FSHLog-0-hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472-prefix:d11ab77873cb,44973,1731785316633 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39489,DS-d869cb24-74d8-4a91-af81-79377e1e695b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:56,355 WARN [FSHLog-0-hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472-prefix:d11ab77873cb,44973,1731785316633 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39489,DS-d869cb24-74d8-4a91-af81-79377e1e695b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:56,356 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d11ab77873cb%2C44973%2C1731785316633:(num 1731785331036) roll requested 2024-11-16T19:28:56,356 INFO [regionserver/d11ab77873cb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C44973%2C1731785316633.1731785336356 2024-11-16T19:28:56,367 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785331036 newFile=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785336356 2024-11-16T19:28:56,367 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:56,367 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:56,368 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:56,368 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:56,368 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:56,368 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785331036 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785336356 2024-11-16T19:28:56,368 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39489,DS-d869cb24-74d8-4a91-af81-79377e1e695b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:56,368 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39489,DS-d869cb24-74d8-4a91-af81-79377e1e695b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:56,368 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785331036 2024-11-16T19:28:56,369 WARN [IPC Server handler 3 on default port 34363 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785331036 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-16T19:28:56,369 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785331036 after 1ms 2024-11-16T19:28:56,369 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40135:40135),(127.0.0.1/127.0.0.1:41331:41331)] 2024-11-16T19:28:56,369 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785331036 is not closed yet, will try archiving it next time 2024-11-16T19:28:56,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:56,463 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:57,458 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:57,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:58,371 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C44973%2C1731785316633.1731785338371 2024-11-16T19:28:58,383 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785336356 newFile=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785338371 2024-11-16T19:28:58,383 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:58,383 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:58,384 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:58,384 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:58,384 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:28:58,384 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785336356 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785338371 2024-11-16T19:28:58,386 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41331:41331),(127.0.0.1/127.0.0.1:40135:40135)] 2024-11-16T19:28:58,386 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785331036 is not closed yet, will try archiving it next time 2024-11-16T19:28:58,386 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785336356 is not closed yet, will try archiving it next time 2024-11-16T19:28:58,386 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785317125 2024-11-16T19:28:58,386 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785317125 2024-11-16T19:28:58,387 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785317125 after 1ms 2024-11-16T19:28:58,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42329 is added to blk_1073741838_1019 (size=1264) 2024-11-16T19:28:58,387 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785317125 2024-11-16T19:28:58,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45445 is added to blk_1073741838_1019 (size=1264) 2024-11-16T19:28:58,388 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785331036 is not closed yet, will try archiving it next time 2024-11-16T19:28:58,398 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731785318455/Put/vlen=218/seqid=0] 2024-11-16T19:28:58,399 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731785327744/Put/vlen=1045/seqid=0] 2024-11-16T19:28:58,399 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785317125 2024-11-16T19:28:58,399 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785331036 2024-11-16T19:28:58,399 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785331036 2024-11-16T19:28:58,399 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785331036 after 0ms 2024-11-16T19:28:58,399 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785331036 2024-11-16T19:28:58,403 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731785331035/Put/vlen=1045/seqid=0] 2024-11-16T19:28:58,403 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731785333051/Put/vlen=1045/seqid=0] 2024-11-16T19:28:58,403 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785331036 2024-11-16T19:28:58,403 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785336356 2024-11-16T19:28:58,403 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785336356 2024-11-16T19:28:58,403 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785336356 after 0ms 2024-11-16T19:28:58,404 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785336356 2024-11-16T19:28:58,406 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731785336355/Put/vlen=1045/seqid=0] 2024-11-16T19:28:58,406 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785338371 2024-11-16T19:28:58,406 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785338371 2024-11-16T19:28:58,407 WARN [IPC Server handler 0 on default port 34363 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785338371 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-16T19:28:58,407 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785338371 after 1ms 2024-11-16T19:28:58,459 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:58,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:59,408 WARN [ResponseProcessor for block BP-856716490-172.17.0.2-1731785316016:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-856716490-172.17.0.2-1731785316016:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:59,408 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1167945216_22 at /127.0.0.1:60358 [Receiving block BP-856716490-172.17.0.2-1731785316016:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:42329:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60358 dst: /127.0.0.1:42329 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:42329 remote=/127.0.0.1:60358]. Total timeout mills is 60000, 58975 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:59,408 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1167945216_22 at /127.0.0.1:54656 [Receiving block BP-856716490-172.17.0.2-1731785316016:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:45445:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54656 dst: /127.0.0.1:45445 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:28:59,408 WARN [DataStreamer for file /user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785338371 block BP-856716490-172.17.0.2-1731785316016:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-856716490-172.17.0.2-1731785316016:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42329,DS-0a4a2b99-55d9-49fb-abfe-e0a0b763b512,DISK], DatanodeInfoWithStorage[127.0.0.1:45445,DS-d869cb24-74d8-4a91-af81-79377e1e695b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42329,DS-0a4a2b99-55d9-49fb-abfe-e0a0b763b512,DISK]) is bad. 2024-11-16T19:28:59,412 WARN [DataStreamer for file /user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785338371 block BP-856716490-172.17.0.2-1731785316016:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-856716490-172.17.0.2-1731785316016:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:28:59,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42329 is added to blk_1073741839_1022 (size=85) 2024-11-16T19:28:59,460 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:28:59,466 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:00,276 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-16T19:29:00,370 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785331036 after 4002ms 2024-11-16T19:29:00,460 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:00,467 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:01,461 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:01,467 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:02,408 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785338371 after 4002ms 2024-11-16T19:29:02,408 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785338371 2024-11-16T19:29:02,416 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785338371 2024-11-16T19:29:02,416 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 708895c41838008cb4f83bd2c991c364 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-16T19:29:02,417 ERROR [FSHLog-0-hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472-prefix:d11ab77873cb,44973,1731785316633 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-856716490-172.17.0.2-1731785316016:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:29:02,417 WARN [FSHLog-0-hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472-prefix:d11ab77873cb,44973,1731785316633 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-856716490-172.17.0.2-1731785316016:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:29:02,418 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d11ab77873cb%2C44973%2C1731785316633:(num 1731785338371) roll requested 2024-11-16T19:29:02,418 INFO [regionserver/d11ab77873cb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C44973%2C1731785316633.1731785342418 2024-11-16T19:29:02,423 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785338371 newFile=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785342418 2024-11-16T19:29:02,423 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:02,423 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:02,423 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:02,424 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:02,424 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:02,424 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785338371 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785342418 2024-11-16T19:29:02,424 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-856716490-172.17.0.2-1731785316016:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:29:02,424 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-856716490-172.17.0.2-1731785316016:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:29:02,425 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785338371 2024-11-16T19:29:02,425 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785338371 after 0ms 2024-11-16T19:29:02,427 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.1731785338371 to hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/oldWALs/d11ab77873cb%2C44973%2C1731785316633.1731785338371 2024-11-16T19:29:02,428 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40135:40135),(127.0.0.1/127.0.0.1:41331:41331)] 2024-11-16T19:29:02,442 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/data/default/TestLogRolling-testLogRollOnPipelineRestart/708895c41838008cb4f83bd2c991c364/.tmp/info/d9aceac6a71f4dca862014ed167e8a26 is 1080, key is row1002/info:/1731785327744/Put/seqid=0 2024-11-16T19:29:02,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45445 is added to blk_1073741841_1024 (size=9270) 2024-11-16T19:29:02,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42329 is added to blk_1073741841_1024 (size=9270) 2024-11-16T19:29:02,448 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/data/default/TestLogRolling-testLogRollOnPipelineRestart/708895c41838008cb4f83bd2c991c364/.tmp/info/d9aceac6a71f4dca862014ed167e8a26 2024-11-16T19:29:02,454 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/data/default/TestLogRolling-testLogRollOnPipelineRestart/708895c41838008cb4f83bd2c991c364/.tmp/info/d9aceac6a71f4dca862014ed167e8a26 as hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/data/default/TestLogRolling-testLogRollOnPipelineRestart/708895c41838008cb4f83bd2c991c364/info/d9aceac6a71f4dca862014ed167e8a26 2024-11-16T19:29:02,460 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/data/default/TestLogRolling-testLogRollOnPipelineRestart/708895c41838008cb4f83bd2c991c364/info/d9aceac6a71f4dca862014ed167e8a26, entries=4, sequenceid=8, filesize=9.1 K 2024-11-16T19:29:02,461 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 708895c41838008cb4f83bd2c991c364 in 45ms, sequenceid=8, compaction requested=false 2024-11-16T19:29:02,461 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 708895c41838008cb4f83bd2c991c364: 2024-11-16T19:29:02,461 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-16T19:29:02,462 ERROR [FSHLog-0-hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472-prefix:d11ab77873cb,44973,1731785316633.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37075,DS-0a4a2b99-55d9-49fb-abfe-e0a0b763b512,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:29:02,462 WARN [FSHLog-0-hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472-prefix:d11ab77873cb,44973,1731785316633.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37075,DS-0a4a2b99-55d9-49fb-abfe-e0a0b763b512,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:29:02,462 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog d11ab77873cb%2C44973%2C1731785316633.meta:.meta(num 1731785317511) roll requested 2024-11-16T19:29:02,462 INFO [regionserver/d11ab77873cb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C44973%2C1731785316633.meta.1731785342462.meta 2024-11-16T19:29:02,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:02,468 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:02,469 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:02,469 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:02,470 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:02,470 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:02,470 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:02,470 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.meta.1731785317511.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.meta.1731785342462.meta 2024-11-16T19:29:02,472 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37075,DS-0a4a2b99-55d9-49fb-abfe-e0a0b763b512,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:29:02,472 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37075,DS-0a4a2b99-55d9-49fb-abfe-e0a0b763b512,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:29:02,472 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.meta.1731785317511.meta 2024-11-16T19:29:02,473 WARN [IPC Server handler 0 on default port 34363 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.meta.1731785317511.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1015 2024-11-16T19:29:02,473 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41331:41331),(127.0.0.1/127.0.0.1:40135:40135)] 2024-11-16T19:29:02,473 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.meta.1731785317511.meta after 1ms 2024-11-16T19:29:02,473 DEBUG [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.meta.1731785317511.meta is not closed yet, will try archiving it next time 2024-11-16T19:29:02,487 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/data/hbase/meta/1588230740/.tmp/info/bb0a9527a0774bfd917cb5dd63438421 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731785317681.708895c41838008cb4f83bd2c991c364./info:regioninfo/1731785318460/Put/seqid=0 2024-11-16T19:29:02,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45445 is added to blk_1073741843_1027 (size=7125) 2024-11-16T19:29:02,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42329 is added to blk_1073741843_1027 (size=7125) 2024-11-16T19:29:02,492 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/data/hbase/meta/1588230740/.tmp/info/bb0a9527a0774bfd917cb5dd63438421 2024-11-16T19:29:02,511 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/data/hbase/meta/1588230740/.tmp/ns/e8fc349e2ce644ef84b5fc67040a66e3 is 43, key is default/ns:d/1731785317551/Put/seqid=0 2024-11-16T19:29:02,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45445 is added to blk_1073741844_1028 (size=5153) 2024-11-16T19:29:02,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42329 is added to blk_1073741844_1028 (size=5153) 2024-11-16T19:29:02,516 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/data/hbase/meta/1588230740/.tmp/ns/e8fc349e2ce644ef84b5fc67040a66e3 2024-11-16T19:29:02,534 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/data/hbase/meta/1588230740/.tmp/table/14be91212afa4651a7e9b1aafbcaa32e is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731785318470/Put/seqid=0 2024-11-16T19:29:02,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42329 is added to blk_1073741845_1029 (size=5438) 2024-11-16T19:29:02,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45445 is added to blk_1073741845_1029 (size=5438) 2024-11-16T19:29:02,539 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/data/hbase/meta/1588230740/.tmp/table/14be91212afa4651a7e9b1aafbcaa32e 2024-11-16T19:29:02,545 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/data/hbase/meta/1588230740/.tmp/info/bb0a9527a0774bfd917cb5dd63438421 as hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/data/hbase/meta/1588230740/info/bb0a9527a0774bfd917cb5dd63438421 2024-11-16T19:29:02,551 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/data/hbase/meta/1588230740/info/bb0a9527a0774bfd917cb5dd63438421, entries=10, sequenceid=11, filesize=7.0 K 2024-11-16T19:29:02,552 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/data/hbase/meta/1588230740/.tmp/ns/e8fc349e2ce644ef84b5fc67040a66e3 as hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/data/hbase/meta/1588230740/ns/e8fc349e2ce644ef84b5fc67040a66e3 2024-11-16T19:29:02,558 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/data/hbase/meta/1588230740/ns/e8fc349e2ce644ef84b5fc67040a66e3, entries=2, sequenceid=11, filesize=5.0 K 2024-11-16T19:29:02,559 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/data/hbase/meta/1588230740/.tmp/table/14be91212afa4651a7e9b1aafbcaa32e as hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/data/hbase/meta/1588230740/table/14be91212afa4651a7e9b1aafbcaa32e 2024-11-16T19:29:02,565 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/data/hbase/meta/1588230740/table/14be91212afa4651a7e9b1aafbcaa32e, entries=2, sequenceid=11, filesize=5.3 K 2024-11-16T19:29:02,566 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 105ms, sequenceid=11, compaction requested=false 2024-11-16T19:29:02,566 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-16T19:29:02,571 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T19:29:02,571 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T19:29:02,571 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T19:29:02,571 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:29:02,572 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:29:02,572 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T19:29:02,572 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T19:29:02,572 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1963214345, stopped=false 2024-11-16T19:29:02,572 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=d11ab77873cb,45925,1731785316592 2024-11-16T19:29:02,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44973-0x1004a02354c0001, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T19:29:02,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45925-0x1004a02354c0000, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T19:29:02,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44973-0x1004a02354c0001, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:29:02,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45925-0x1004a02354c0000, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:29:02,573 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T19:29:02,574 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44973-0x1004a02354c0001, quorum=127.0.0.1:61191, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T19:29:02,574 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T19:29:02,574 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45925-0x1004a02354c0000, quorum=127.0.0.1:61191, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T19:29:02,574 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T19:29:02,574 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:29:02,574 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'd11ab77873cb,44973,1731785316633' ***** 2024-11-16T19:29:02,574 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T19:29:02,574 INFO [RS:0;d11ab77873cb:44973 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T19:29:02,574 INFO [RS:0;d11ab77873cb:44973 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T19:29:02,574 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T19:29:02,574 INFO [RS:0;d11ab77873cb:44973 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T19:29:02,574 INFO [RS:0;d11ab77873cb:44973 {}] regionserver.HRegionServer(3091): Received CLOSE for 708895c41838008cb4f83bd2c991c364 2024-11-16T19:29:02,575 INFO [RS:0;d11ab77873cb:44973 {}] regionserver.HRegionServer(959): stopping server d11ab77873cb,44973,1731785316633 2024-11-16T19:29:02,575 INFO [RS:0;d11ab77873cb:44973 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T19:29:02,575 INFO [RS:0;d11ab77873cb:44973 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;d11ab77873cb:44973. 2024-11-16T19:29:02,575 DEBUG [RS:0;d11ab77873cb:44973 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T19:29:02,575 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 708895c41838008cb4f83bd2c991c364, disabling compactions & flushes 2024-11-16T19:29:02,575 DEBUG [RS:0;d11ab77873cb:44973 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:29:02,575 INFO [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731785317681.708895c41838008cb4f83bd2c991c364. 2024-11-16T19:29:02,575 INFO [RS:0;d11ab77873cb:44973 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T19:29:02,575 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731785317681.708895c41838008cb4f83bd2c991c364. 2024-11-16T19:29:02,575 INFO [RS:0;d11ab77873cb:44973 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T19:29:02,575 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731785317681.708895c41838008cb4f83bd2c991c364. after waiting 0 ms 2024-11-16T19:29:02,575 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731785317681.708895c41838008cb4f83bd2c991c364. 2024-11-16T19:29:02,575 INFO [RS:0;d11ab77873cb:44973 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T19:29:02,575 INFO [RS:0;d11ab77873cb:44973 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T19:29:02,575 INFO [RS:0;d11ab77873cb:44973 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-16T19:29:02,575 DEBUG [RS:0;d11ab77873cb:44973 {}] regionserver.HRegionServer(1325): Online Regions={708895c41838008cb4f83bd2c991c364=TestLogRolling-testLogRollOnPipelineRestart,,1731785317681.708895c41838008cb4f83bd2c991c364., 1588230740=hbase:meta,,1.1588230740} 2024-11-16T19:29:02,575 DEBUG [RS:0;d11ab77873cb:44973 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 708895c41838008cb4f83bd2c991c364 2024-11-16T19:29:02,575 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T19:29:02,575 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T19:29:02,575 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T19:29:02,575 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T19:29:02,575 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T19:29:02,579 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/data/default/TestLogRolling-testLogRollOnPipelineRestart/708895c41838008cb4f83bd2c991c364/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-16T19:29:02,579 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-16T19:29:02,579 INFO [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731785317681.708895c41838008cb4f83bd2c991c364. 2024-11-16T19:29:02,580 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T19:29:02,580 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 708895c41838008cb4f83bd2c991c364: Waiting for close lock at 1731785342575Running coprocessor pre-close hooks at 1731785342575Disabling compacts and flushes for region at 1731785342575Disabling writes for close at 1731785342575Writing region close event to WAL at 1731785342575Running coprocessor post-close hooks at 1731785342579 (+4 ms)Closed at 1731785342579 2024-11-16T19:29:02,580 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T19:29:02,580 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731785342575Running coprocessor pre-close hooks at 1731785342575Disabling compacts and flushes for region at 1731785342575Disabling writes for close at 1731785342575Writing region close event to WAL at 1731785342576 (+1 ms)Running coprocessor post-close hooks at 1731785342579 (+3 ms)Closed at 1731785342580 (+1 ms) 2024-11-16T19:29:02,580 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731785317681.708895c41838008cb4f83bd2c991c364. 2024-11-16T19:29:02,580 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T19:29:02,775 INFO [RS:0;d11ab77873cb:44973 {}] regionserver.HRegionServer(976): stopping server d11ab77873cb,44973,1731785316633; all regions closed. 2024-11-16T19:29:02,776 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:02,776 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:02,777 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:02,777 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:02,777 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:02,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45445 is added to blk_1073741842_1025 (size=825) 2024-11-16T19:29:02,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42329 is added to blk_1073741842_1025 (size=825) 2024-11-16T19:29:02,990 INFO [regionserver/d11ab77873cb:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-16T19:29:02,990 INFO [regionserver/d11ab77873cb:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-16T19:29:02,991 INFO [regionserver/d11ab77873cb:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T19:29:03,463 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:03,468 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:04,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:04,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:05,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:05,470 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:06,399 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1015: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-16T19:29:06,466 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:06,471 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:06,474 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.meta.1731785317511.meta after 4002ms 2024-11-16T19:29:06,475 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/WALs/d11ab77873cb,44973,1731785316633/d11ab77873cb%2C44973%2C1731785316633.meta.1731785317511.meta to hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/oldWALs/d11ab77873cb%2C44973%2C1731785316633.meta.1731785317511.meta 2024-11-16T19:29:06,482 DEBUG [RS:0;d11ab77873cb:44973 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/oldWALs 2024-11-16T19:29:06,482 INFO [RS:0;d11ab77873cb:44973 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d11ab77873cb%2C44973%2C1731785316633.meta:.meta(num 1731785342462) 2024-11-16T19:29:06,483 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:06,483 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:06,483 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:06,483 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:06,484 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:06,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42329 is added to blk_1073741840_1023 (size=1162) 2024-11-16T19:29:06,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45445 is added to blk_1073741840_1023 (size=1162) 2024-11-16T19:29:06,494 DEBUG [RS:0;d11ab77873cb:44973 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/oldWALs 2024-11-16T19:29:06,494 INFO [RS:0;d11ab77873cb:44973 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d11ab77873cb%2C44973%2C1731785316633:(num 1731785342418) 2024-11-16T19:29:06,494 DEBUG [RS:0;d11ab77873cb:44973 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:29:06,494 INFO [RS:0;d11ab77873cb:44973 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T19:29:06,494 INFO [RS:0;d11ab77873cb:44973 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T19:29:06,495 INFO [RS:0;d11ab77873cb:44973 {}] hbase.ChoreService(370): Chore service for: regionserver/d11ab77873cb:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-16T19:29:06,495 INFO [RS:0;d11ab77873cb:44973 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T19:29:06,495 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T19:29:06,495 INFO [RS:0;d11ab77873cb:44973 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44973 2024-11-16T19:29:06,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44973-0x1004a02354c0001, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/d11ab77873cb,44973,1731785316633 2024-11-16T19:29:06,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45925-0x1004a02354c0000, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T19:29:06,496 INFO [RS:0;d11ab77873cb:44973 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T19:29:06,497 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [d11ab77873cb,44973,1731785316633] 2024-11-16T19:29:06,497 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/d11ab77873cb,44973,1731785316633 already deleted, retry=false 2024-11-16T19:29:06,498 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; d11ab77873cb,44973,1731785316633 expired; onlineServers=0 2024-11-16T19:29:06,498 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'd11ab77873cb,45925,1731785316592' ***** 2024-11-16T19:29:06,498 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T19:29:06,498 INFO [M:0;d11ab77873cb:45925 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T19:29:06,498 INFO [M:0;d11ab77873cb:45925 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T19:29:06,498 DEBUG [M:0;d11ab77873cb:45925 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T19:29:06,498 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T19:29:06,498 DEBUG [M:0;d11ab77873cb:45925 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T19:29:06,498 DEBUG [master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.small.0-1731785316894 {}] cleaner.HFileCleaner(306): Exit Thread[master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.small.0-1731785316894,5,FailOnTimeoutGroup] 2024-11-16T19:29:06,498 DEBUG [master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.large.0-1731785316894 {}] cleaner.HFileCleaner(306): Exit Thread[master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.large.0-1731785316894,5,FailOnTimeoutGroup] 2024-11-16T19:29:06,498 INFO [M:0;d11ab77873cb:45925 {}] hbase.ChoreService(370): Chore service for: master/d11ab77873cb:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T19:29:06,499 INFO [M:0;d11ab77873cb:45925 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T19:29:06,499 DEBUG [M:0;d11ab77873cb:45925 {}] master.HMaster(1795): Stopping service threads 2024-11-16T19:29:06,499 INFO [M:0;d11ab77873cb:45925 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T19:29:06,499 INFO [M:0;d11ab77873cb:45925 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T19:29:06,499 INFO [M:0;d11ab77873cb:45925 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T19:29:06,499 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T19:29:06,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45925-0x1004a02354c0000, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T19:29:06,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45925-0x1004a02354c0000, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:29:06,499 DEBUG [M:0;d11ab77873cb:45925 {}] zookeeper.ZKUtil(347): master:45925-0x1004a02354c0000, quorum=127.0.0.1:61191, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T19:29:06,499 WARN [M:0;d11ab77873cb:45925 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T19:29:06,500 INFO [M:0;d11ab77873cb:45925 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/.lastflushedseqids 2024-11-16T19:29:06,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45445 is added to blk_1073741846_1030 (size=130) 2024-11-16T19:29:06,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42329 is added to blk_1073741846_1030 (size=130) 2024-11-16T19:29:06,505 INFO [M:0;d11ab77873cb:45925 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T19:29:06,505 INFO [M:0;d11ab77873cb:45925 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T19:29:06,506 DEBUG [M:0;d11ab77873cb:45925 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T19:29:06,506 INFO [M:0;d11ab77873cb:45925 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:29:06,506 DEBUG [M:0;d11ab77873cb:45925 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:29:06,506 DEBUG [M:0;d11ab77873cb:45925 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T19:29:06,506 DEBUG [M:0;d11ab77873cb:45925 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:29:06,506 INFO [M:0;d11ab77873cb:45925 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.16 KB heapSize=29.13 KB 2024-11-16T19:29:06,506 ERROR [FSHLog-0-hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData-prefix:d11ab77873cb,45925,1731785316592 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37075,DS-0a4a2b99-55d9-49fb-abfe-e0a0b763b512,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:29:06,506 WARN [FSHLog-0-hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData-prefix:d11ab77873cb,45925,1731785316592 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37075,DS-0a4a2b99-55d9-49fb-abfe-e0a0b763b512,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:29:06,507 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog d11ab77873cb%2C45925%2C1731785316592:(num 1731785316825) roll requested 2024-11-16T19:29:06,507 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C45925%2C1731785316592.1731785346507 2024-11-16T19:29:06,511 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:06,512 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:06,512 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:06,512 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:06,512 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:06,512 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/WALs/d11ab77873cb,45925,1731785316592/d11ab77873cb%2C45925%2C1731785316592.1731785316825 with entries=53, filesize=26.61 KB; new WAL /user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/WALs/d11ab77873cb,45925,1731785316592/d11ab77873cb%2C45925%2C1731785316592.1731785346507 2024-11-16T19:29:06,512 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37075,DS-0a4a2b99-55d9-49fb-abfe-e0a0b763b512,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:29:06,513 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37075,DS-0a4a2b99-55d9-49fb-abfe-e0a0b763b512,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T19:29:06,513 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/WALs/d11ab77873cb,45925,1731785316592/d11ab77873cb%2C45925%2C1731785316592.1731785316825 2024-11-16T19:29:06,513 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41331:41331),(127.0.0.1/127.0.0.1:40135:40135)] 2024-11-16T19:29:06,513 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/WALs/d11ab77873cb,45925,1731785316592/d11ab77873cb%2C45925%2C1731785316592.1731785316825 is not closed yet, will try archiving it next time 2024-11-16T19:29:06,513 WARN [IPC Server handler 4 on default port 34363 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/WALs/d11ab77873cb,45925,1731785316592/d11ab77873cb%2C45925%2C1731785316592.1731785316825 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-11-16T19:29:06,513 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/WALs/d11ab77873cb,45925,1731785316592/d11ab77873cb%2C45925%2C1731785316592.1731785316825 after 0ms 2024-11-16T19:29:06,527 DEBUG [M:0;d11ab77873cb:45925 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4201753c2005486cbf91268537979dc4 is 82, key is hbase:meta,,1/info:regioninfo/1731785317538/Put/seqid=0 2024-11-16T19:29:06,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45445 is added to blk_1073741848_1033 (size=5672) 2024-11-16T19:29:06,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42329 is added to blk_1073741848_1033 (size=5672) 2024-11-16T19:29:06,532 INFO [M:0;d11ab77873cb:45925 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4201753c2005486cbf91268537979dc4 2024-11-16T19:29:06,551 DEBUG [M:0;d11ab77873cb:45925 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/218e592662b440f1b75e1cdd98c37351 is 777, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731785318474/Put/seqid=0 2024-11-16T19:29:06,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42329 is added to blk_1073741849_1034 (size=6117) 2024-11-16T19:29:06,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45445 is added to blk_1073741849_1034 (size=6117) 2024-11-16T19:29:06,556 INFO [M:0;d11ab77873cb:45925 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.56 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/218e592662b440f1b75e1cdd98c37351 2024-11-16T19:29:06,571 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T19:29:06,576 DEBUG [M:0;d11ab77873cb:45925 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4cd1bc71b6314a4ab9d3ebf729e76f30 is 69, key is d11ab77873cb,44973,1731785316633/rs:state/1731785316975/Put/seqid=0 2024-11-16T19:29:06,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42329 is added to blk_1073741850_1035 (size=5156) 2024-11-16T19:29:06,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45445 is added to blk_1073741850_1035 (size=5156) 2024-11-16T19:29:06,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44973-0x1004a02354c0001, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T19:29:06,597 INFO [RS:0;d11ab77873cb:44973 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T19:29:06,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44973-0x1004a02354c0001, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T19:29:06,597 INFO [RS:0;d11ab77873cb:44973 {}] regionserver.HRegionServer(1031): Exiting; stopping=d11ab77873cb,44973,1731785316633; zookeeper connection closed. 2024-11-16T19:29:06,597 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4df0d8b1 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4df0d8b1 2024-11-16T19:29:06,598 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T19:29:06,983 INFO [M:0;d11ab77873cb:45925 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4cd1bc71b6314a4ab9d3ebf729e76f30 2024-11-16T19:29:07,009 DEBUG [M:0;d11ab77873cb:45925 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/aeed99bdc33e49f8b50a437c43f19aaf is 52, key is load_balancer_on/state:d/1731785317674/Put/seqid=0 2024-11-16T19:29:07,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42329 is added to blk_1073741851_1036 (size=5056) 2024-11-16T19:29:07,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45445 is added to blk_1073741851_1036 (size=5056) 2024-11-16T19:29:07,015 INFO [M:0;d11ab77873cb:45925 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/aeed99bdc33e49f8b50a437c43f19aaf 2024-11-16T19:29:07,020 DEBUG [M:0;d11ab77873cb:45925 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4201753c2005486cbf91268537979dc4 as hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4201753c2005486cbf91268537979dc4 2024-11-16T19:29:07,026 INFO [M:0;d11ab77873cb:45925 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4201753c2005486cbf91268537979dc4, entries=8, sequenceid=56, filesize=5.5 K 2024-11-16T19:29:07,027 DEBUG [M:0;d11ab77873cb:45925 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/218e592662b440f1b75e1cdd98c37351 as hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/218e592662b440f1b75e1cdd98c37351 2024-11-16T19:29:07,034 INFO [M:0;d11ab77873cb:45925 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/218e592662b440f1b75e1cdd98c37351, entries=6, sequenceid=56, filesize=6.0 K 2024-11-16T19:29:07,035 DEBUG [M:0;d11ab77873cb:45925 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4cd1bc71b6314a4ab9d3ebf729e76f30 as hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4cd1bc71b6314a4ab9d3ebf729e76f30 2024-11-16T19:29:07,042 INFO [M:0;d11ab77873cb:45925 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4cd1bc71b6314a4ab9d3ebf729e76f30, entries=1, sequenceid=56, filesize=5.0 K 2024-11-16T19:29:07,043 DEBUG [M:0;d11ab77873cb:45925 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/aeed99bdc33e49f8b50a437c43f19aaf as hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/aeed99bdc33e49f8b50a437c43f19aaf 2024-11-16T19:29:07,049 INFO [M:0;d11ab77873cb:45925 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/aeed99bdc33e49f8b50a437c43f19aaf, entries=1, sequenceid=56, filesize=4.9 K 2024-11-16T19:29:07,050 INFO [M:0;d11ab77873cb:45925 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 544ms, sequenceid=56, compaction requested=false 2024-11-16T19:29:07,051 INFO [M:0;d11ab77873cb:45925 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:29:07,052 DEBUG [M:0;d11ab77873cb:45925 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731785346506Disabling compacts and flushes for region at 1731785346506Disabling writes for close at 1731785346506Obtaining lock to block concurrent updates at 1731785346506Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731785346506Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23714, getHeapSize=29768, getOffHeapSize=0, getCellsCount=67 at 1731785346506Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731785346513 (+7 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731785346513Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731785346526 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731785346526Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731785346537 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731785346551 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731785346551Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731785346560 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731785346576 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731785346576Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731785346994 (+418 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731785347009 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731785347009Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@594062e9: reopening flushed file at 1731785347019 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3637a24e: reopening flushed file at 1731785347026 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3a5e57b9: reopening flushed file at 1731785347034 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@177c8ba: reopening flushed file at 1731785347042 (+8 ms)Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 544ms, sequenceid=56, compaction requested=false at 1731785347050 (+8 ms)Writing region close event to WAL at 1731785347051 (+1 ms)Closed at 1731785347051 2024-11-16T19:29:07,052 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:07,052 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:07,052 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:07,052 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:07,052 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:07,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45445 is added to blk_1073741847_1031 (size=757) 2024-11-16T19:29:07,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42329 is added to blk_1073741847_1031 (size=757) 2024-11-16T19:29:07,467 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:07,472 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:07,581 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:07,582 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:07,600 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:07,601 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:07,601 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:07,601 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:07,601 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:07,601 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:07,604 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:07,604 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:07,604 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:07,606 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:07,610 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:07,610 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:08,113 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T19:29:08,114 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:08,114 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:08,115 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:08,115 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:08,136 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:08,136 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:08,137 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:08,137 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:08,137 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:08,137 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:08,141 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:08,141 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:08,141 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:08,143 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:08,454 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T19:29:08,455 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T19:29:08,455 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-16T19:29:08,455 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-16T19:29:08,468 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:08,473 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:09,401 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-16T19:29:09,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:09,474 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:10,470 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:10,475 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:10,514 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/WALs/d11ab77873cb,45925,1731785316592/d11ab77873cb%2C45925%2C1731785316592.1731785316825 after 4001ms 2024-11-16T19:29:10,515 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/WALs/d11ab77873cb,45925,1731785316592/d11ab77873cb%2C45925%2C1731785316592.1731785316825 to hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/oldWALs/d11ab77873cb%2C45925%2C1731785316592.1731785316825 2024-11-16T19:29:10,519 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/MasterData/oldWALs/d11ab77873cb%2C45925%2C1731785316592.1731785316825 to hdfs://localhost:34363/user/jenkins/test-data/2f8a8de3-7f5b-b4d0-9597-f47c5abb3472/oldWALs/d11ab77873cb%2C45925%2C1731785316592.1731785316825$masterlocalwal$ 2024-11-16T19:29:10,519 INFO [M:0;d11ab77873cb:45925 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T19:29:10,519 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T19:29:10,519 INFO [M:0;d11ab77873cb:45925 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45925 2024-11-16T19:29:10,519 INFO [M:0;d11ab77873cb:45925 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T19:29:10,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45925-0x1004a02354c0000, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T19:29:10,621 INFO [M:0;d11ab77873cb:45925 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T19:29:10,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45925-0x1004a02354c0000, quorum=127.0.0.1:61191, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T19:29:10,626 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@528eeea6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:29:10,627 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@20466b6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T19:29:10,627 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T19:29:10,627 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2026736d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T19:29:10,628 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@760c54cf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/hadoop.log.dir/,STOPPED} 2024-11-16T19:29:10,630 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T19:29:10,630 WARN [BP-856716490-172.17.0.2-1731785316016 heartbeating to localhost/127.0.0.1:34363 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T19:29:10,630 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T19:29:10,630 WARN [BP-856716490-172.17.0.2-1731785316016 heartbeating to localhost/127.0.0.1:34363 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-856716490-172.17.0.2-1731785316016 (Datanode Uuid 4bc3b464-663d-4517-a754-fe886e518a90) service to localhost/127.0.0.1:34363 2024-11-16T19:29:10,631 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/cluster_032fc1cc-3d9e-2961-e04e-0405007a6b3d/data/data3/current/BP-856716490-172.17.0.2-1731785316016 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:29:10,632 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/cluster_032fc1cc-3d9e-2961-e04e-0405007a6b3d/data/data4/current/BP-856716490-172.17.0.2-1731785316016 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:29:10,632 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T19:29:10,635 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2ca8564b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:29:10,635 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@fa662a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T19:29:10,635 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T19:29:10,635 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@167fd01b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T19:29:10,635 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@718ea2f4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/hadoop.log.dir/,STOPPED} 2024-11-16T19:29:10,637 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T19:29:10,637 WARN [BP-856716490-172.17.0.2-1731785316016 heartbeating to localhost/127.0.0.1:34363 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T19:29:10,637 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T19:29:10,637 WARN [BP-856716490-172.17.0.2-1731785316016 heartbeating to localhost/127.0.0.1:34363 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-856716490-172.17.0.2-1731785316016 (Datanode Uuid 1fc598ff-773e-473c-adca-22e7b3ecc203) service to localhost/127.0.0.1:34363 2024-11-16T19:29:10,637 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/cluster_032fc1cc-3d9e-2961-e04e-0405007a6b3d/data/data1/current/BP-856716490-172.17.0.2-1731785316016 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:29:10,638 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/cluster_032fc1cc-3d9e-2961-e04e-0405007a6b3d/data/data2/current/BP-856716490-172.17.0.2-1731785316016 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:29:10,638 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T19:29:10,643 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1915705e{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T19:29:10,643 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@17b2a9ba{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T19:29:10,643 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T19:29:10,643 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@27a49013{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T19:29:10,643 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7bd03e52{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/hadoop.log.dir/,STOPPED} 2024-11-16T19:29:10,649 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T19:29:10,667 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T19:29:10,673 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=181 (was 155) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:34363 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34363 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34363 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:34363 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:34363 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34363 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34363 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:34363 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 452) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=109 (was 140), ProcessCount=11 (was 11), AvailableMemoryMB=2191 (was 2401) 2024-11-16T19:29:10,680 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=181, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=109, ProcessCount=11, AvailableMemoryMB=2191 2024-11-16T19:29:10,680 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T19:29:10,680 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/hadoop.log.dir so I do NOT create it in target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49 2024-11-16T19:29:10,680 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c2f309fd-a6b8-bfce-4a69-1d58d75a3d34/hadoop.tmp.dir so I do NOT create it in target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49 2024-11-16T19:29:10,681 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/cluster_16e972a0-4f45-7c9c-d7b9-4b3aeba5bcef, deleteOnExit=true 2024-11-16T19:29:10,681 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T19:29:10,681 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/test.cache.data in system properties and HBase conf 2024-11-16T19:29:10,681 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T19:29:10,681 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/hadoop.log.dir in system properties and HBase conf 2024-11-16T19:29:10,681 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T19:29:10,681 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T19:29:10,681 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T19:29:10,681 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T19:29:10,682 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T19:29:10,682 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T19:29:10,682 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T19:29:10,682 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T19:29:10,682 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T19:29:10,682 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T19:29:10,682 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T19:29:10,682 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T19:29:10,682 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T19:29:10,682 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/nfs.dump.dir in system properties and HBase conf 2024-11-16T19:29:10,682 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/java.io.tmpdir in system properties and HBase conf 2024-11-16T19:29:10,683 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T19:29:10,683 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T19:29:10,683 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T19:29:10,695 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T19:29:10,748 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:29:10,752 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T19:29:10,754 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T19:29:10,754 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T19:29:10,754 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T19:29:10,755 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:29:10,755 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@78b6351{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/hadoop.log.dir/,AVAILABLE} 2024-11-16T19:29:10,755 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5bbcc3bf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T19:29:10,856 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@232381c8{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/java.io.tmpdir/jetty-localhost-43831-hadoop-hdfs-3_4_1-tests_jar-_-any-13052567557576106349/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T19:29:10,857 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@290f188f{HTTP/1.1, (http/1.1)}{localhost:43831} 2024-11-16T19:29:10,857 INFO [Time-limited test {}] server.Server(415): Started @177511ms 2024-11-16T19:29:10,868 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T19:29:10,920 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:29:10,923 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T19:29:10,924 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T19:29:10,924 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T19:29:10,924 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T19:29:10,925 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1b3167c8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/hadoop.log.dir/,AVAILABLE} 2024-11-16T19:29:10,925 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fa328f4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T19:29:11,018 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2d027a3b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/java.io.tmpdir/jetty-localhost-44203-hadoop-hdfs-3_4_1-tests_jar-_-any-16566668528135830595/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:29:11,019 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5b5b6aa1{HTTP/1.1, (http/1.1)}{localhost:44203} 2024-11-16T19:29:11,019 INFO [Time-limited test {}] server.Server(415): Started @177673ms 2024-11-16T19:29:11,020 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T19:29:11,046 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:29:11,049 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T19:29:11,050 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T19:29:11,050 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T19:29:11,050 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T19:29:11,050 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@b24fbcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/hadoop.log.dir/,AVAILABLE} 2024-11-16T19:29:11,050 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16aeea80{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T19:29:11,075 WARN [Thread-1628 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/cluster_16e972a0-4f45-7c9c-d7b9-4b3aeba5bcef/data/data1/current/BP-780948166-172.17.0.2-1731785350708/current, will proceed with Du for space computation calculation, 2024-11-16T19:29:11,076 WARN [Thread-1629 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/cluster_16e972a0-4f45-7c9c-d7b9-4b3aeba5bcef/data/data2/current/BP-780948166-172.17.0.2-1731785350708/current, will proceed with Du for space computation calculation, 2024-11-16T19:29:11,089 WARN [Thread-1607 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T19:29:11,092 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x203bb081b30a763e with lease ID 0xe70252515cade341: Processing first storage report for DS-92c6fad4-27b8-42b7-b0d9-e13d6f6b0126 from datanode DatanodeRegistration(127.0.0.1:39823, datanodeUuid=cc89cd6c-53ac-40af-8b4c-ce6898d285e9, infoPort=45053, infoSecurePort=0, ipcPort=45079, storageInfo=lv=-57;cid=testClusterID;nsid=2083543628;c=1731785350708) 2024-11-16T19:29:11,092 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x203bb081b30a763e with lease ID 0xe70252515cade341: from storage DS-92c6fad4-27b8-42b7-b0d9-e13d6f6b0126 node DatanodeRegistration(127.0.0.1:39823, datanodeUuid=cc89cd6c-53ac-40af-8b4c-ce6898d285e9, infoPort=45053, infoSecurePort=0, ipcPort=45079, storageInfo=lv=-57;cid=testClusterID;nsid=2083543628;c=1731785350708), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T19:29:11,092 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x203bb081b30a763e with lease ID 0xe70252515cade341: Processing first storage report for DS-1303d030-9eb7-4e6d-a128-b9aef81653c4 from datanode DatanodeRegistration(127.0.0.1:39823, datanodeUuid=cc89cd6c-53ac-40af-8b4c-ce6898d285e9, infoPort=45053, infoSecurePort=0, ipcPort=45079, storageInfo=lv=-57;cid=testClusterID;nsid=2083543628;c=1731785350708) 2024-11-16T19:29:11,092 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x203bb081b30a763e with lease ID 0xe70252515cade341: from storage DS-1303d030-9eb7-4e6d-a128-b9aef81653c4 node DatanodeRegistration(127.0.0.1:39823, datanodeUuid=cc89cd6c-53ac-40af-8b4c-ce6898d285e9, infoPort=45053, infoSecurePort=0, ipcPort=45079, storageInfo=lv=-57;cid=testClusterID;nsid=2083543628;c=1731785350708), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:29:11,146 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5d8d7f9b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/java.io.tmpdir/jetty-localhost-37459-hadoop-hdfs-3_4_1-tests_jar-_-any-12990093488047717321/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:29:11,146 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@190ad9e7{HTTP/1.1, (http/1.1)}{localhost:37459} 2024-11-16T19:29:11,146 INFO [Time-limited test {}] server.Server(415): Started @177801ms 2024-11-16T19:29:11,148 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T19:29:11,200 WARN [Thread-1655 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/cluster_16e972a0-4f45-7c9c-d7b9-4b3aeba5bcef/data/data4/current/BP-780948166-172.17.0.2-1731785350708/current, will proceed with Du for space computation calculation, 2024-11-16T19:29:11,200 WARN [Thread-1654 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/cluster_16e972a0-4f45-7c9c-d7b9-4b3aeba5bcef/data/data3/current/BP-780948166-172.17.0.2-1731785350708/current, will proceed with Du for space computation calculation, 2024-11-16T19:29:11,220 WARN [Thread-1643 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T19:29:11,222 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdf7f046a90b2614d with lease ID 0xe70252515cade342: Processing first storage report for DS-a176e4ad-625f-40d5-a77e-2f82b2f6ac0e from datanode DatanodeRegistration(127.0.0.1:38639, datanodeUuid=56eb494f-fe7b-4c24-9106-5f68b9058e64, infoPort=39465, infoSecurePort=0, ipcPort=36583, storageInfo=lv=-57;cid=testClusterID;nsid=2083543628;c=1731785350708) 2024-11-16T19:29:11,222 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdf7f046a90b2614d with lease ID 0xe70252515cade342: from storage DS-a176e4ad-625f-40d5-a77e-2f82b2f6ac0e node DatanodeRegistration(127.0.0.1:38639, datanodeUuid=56eb494f-fe7b-4c24-9106-5f68b9058e64, infoPort=39465, infoSecurePort=0, ipcPort=36583, storageInfo=lv=-57;cid=testClusterID;nsid=2083543628;c=1731785350708), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:29:11,222 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdf7f046a90b2614d with lease ID 0xe70252515cade342: Processing first storage report for DS-65804837-5e28-4b70-9e9d-dd87d8e88f0e from datanode DatanodeRegistration(127.0.0.1:38639, datanodeUuid=56eb494f-fe7b-4c24-9106-5f68b9058e64, infoPort=39465, infoSecurePort=0, ipcPort=36583, storageInfo=lv=-57;cid=testClusterID;nsid=2083543628;c=1731785350708) 2024-11-16T19:29:11,222 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdf7f046a90b2614d with lease ID 0xe70252515cade342: from storage DS-65804837-5e28-4b70-9e9d-dd87d8e88f0e node DatanodeRegistration(127.0.0.1:38639, datanodeUuid=56eb494f-fe7b-4c24-9106-5f68b9058e64, infoPort=39465, infoSecurePort=0, ipcPort=36583, storageInfo=lv=-57;cid=testClusterID;nsid=2083543628;c=1731785350708), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:29:11,272 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49 2024-11-16T19:29:11,276 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/cluster_16e972a0-4f45-7c9c-d7b9-4b3aeba5bcef/zookeeper_0, clientPort=51672, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/cluster_16e972a0-4f45-7c9c-d7b9-4b3aeba5bcef/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/cluster_16e972a0-4f45-7c9c-d7b9-4b3aeba5bcef/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T19:29:11,277 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51672 2024-11-16T19:29:11,277 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:29:11,278 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:29:11,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39823 is added to blk_1073741825_1001 (size=7) 2024-11-16T19:29:11,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38639 is added to blk_1073741825_1001 (size=7) 2024-11-16T19:29:11,287 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef with version=8 2024-11-16T19:29:11,287 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/hbase-staging 2024-11-16T19:29:11,289 INFO [Time-limited test {}] client.ConnectionUtils(128): master/d11ab77873cb:0 server-side Connection retries=45 2024-11-16T19:29:11,289 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T19:29:11,289 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T19:29:11,289 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T19:29:11,289 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T19:29:11,289 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T19:29:11,289 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T19:29:11,289 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T19:29:11,290 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40933 2024-11-16T19:29:11,291 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40933 connecting to ZooKeeper ensemble=127.0.0.1:51672 2024-11-16T19:29:11,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:409330x0, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T19:29:11,295 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40933-0x1004a02bcd90000 connected 2024-11-16T19:29:11,307 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:29:11,309 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:29:11,311 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40933-0x1004a02bcd90000, quorum=127.0.0.1:51672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T19:29:11,311 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef, hbase.cluster.distributed=false 2024-11-16T19:29:11,314 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40933-0x1004a02bcd90000, quorum=127.0.0.1:51672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T19:29:11,315 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40933 2024-11-16T19:29:11,315 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40933 2024-11-16T19:29:11,316 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40933 2024-11-16T19:29:11,316 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40933 2024-11-16T19:29:11,316 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40933 2024-11-16T19:29:11,335 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/d11ab77873cb:0 server-side Connection retries=45 2024-11-16T19:29:11,335 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T19:29:11,335 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T19:29:11,336 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T19:29:11,336 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T19:29:11,336 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T19:29:11,336 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T19:29:11,336 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T19:29:11,336 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38717 2024-11-16T19:29:11,338 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38717 connecting to ZooKeeper ensemble=127.0.0.1:51672 2024-11-16T19:29:11,338 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:29:11,339 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:29:11,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:387170x0, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T19:29:11,343 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:387170x0, quorum=127.0.0.1:51672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T19:29:11,343 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38717-0x1004a02bcd90001 connected 2024-11-16T19:29:11,343 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T19:29:11,344 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T19:29:11,344 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38717-0x1004a02bcd90001, quorum=127.0.0.1:51672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T19:29:11,345 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38717-0x1004a02bcd90001, quorum=127.0.0.1:51672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T19:29:11,345 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38717 2024-11-16T19:29:11,345 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38717 2024-11-16T19:29:11,346 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38717 2024-11-16T19:29:11,346 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38717 2024-11-16T19:29:11,346 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38717 2024-11-16T19:29:11,356 DEBUG [M:0;d11ab77873cb:40933 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;d11ab77873cb:40933 2024-11-16T19:29:11,356 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/d11ab77873cb,40933,1731785351288 2024-11-16T19:29:11,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40933-0x1004a02bcd90000, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T19:29:11,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38717-0x1004a02bcd90001, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T19:29:11,358 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40933-0x1004a02bcd90000, quorum=127.0.0.1:51672, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/d11ab77873cb,40933,1731785351288 2024-11-16T19:29:11,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38717-0x1004a02bcd90001, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T19:29:11,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40933-0x1004a02bcd90000, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:29:11,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38717-0x1004a02bcd90001, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:29:11,359 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40933-0x1004a02bcd90000, quorum=127.0.0.1:51672, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T19:29:11,360 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/d11ab77873cb,40933,1731785351288 from backup master directory 2024-11-16T19:29:11,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40933-0x1004a02bcd90000, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/d11ab77873cb,40933,1731785351288 2024-11-16T19:29:11,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38717-0x1004a02bcd90001, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T19:29:11,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40933-0x1004a02bcd90000, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T19:29:11,360 WARN [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T19:29:11,360 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=d11ab77873cb,40933,1731785351288 2024-11-16T19:29:11,364 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/hbase.id] with ID: 36acdc00-333b-4fd1-8171-cc88c665bb85 2024-11-16T19:29:11,364 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/.tmp/hbase.id 2024-11-16T19:29:11,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39823 is added to blk_1073741826_1002 (size=42) 2024-11-16T19:29:11,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38639 is added to blk_1073741826_1002 (size=42) 2024-11-16T19:29:11,372 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/.tmp/hbase.id]:[hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/hbase.id] 2024-11-16T19:29:11,382 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:29:11,382 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T19:29:11,383 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-16T19:29:11,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40933-0x1004a02bcd90000, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:29:11,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38717-0x1004a02bcd90001, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:29:11,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39823 is added to blk_1073741827_1003 (size=196) 2024-11-16T19:29:11,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38639 is added to blk_1073741827_1003 (size=196) 2024-11-16T19:29:11,391 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T19:29:11,391 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T19:29:11,392 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T19:29:11,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39823 is added to blk_1073741828_1004 (size=1189) 2024-11-16T19:29:11,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38639 is added to blk_1073741828_1004 (size=1189) 2024-11-16T19:29:11,402 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/MasterData/data/master/store 2024-11-16T19:29:11,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39823 is added to blk_1073741829_1005 (size=34) 2024-11-16T19:29:11,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38639 is added to blk_1073741829_1005 (size=34) 2024-11-16T19:29:11,409 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:29:11,409 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T19:29:11,409 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:29:11,409 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:29:11,409 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T19:29:11,409 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:29:11,409 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:29:11,409 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731785351409Disabling compacts and flushes for region at 1731785351409Disabling writes for close at 1731785351409Writing region close event to WAL at 1731785351409Closed at 1731785351409 2024-11-16T19:29:11,410 WARN [master/d11ab77873cb:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/MasterData/data/master/store/.initializing 2024-11-16T19:29:11,410 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/MasterData/WALs/d11ab77873cb,40933,1731785351288 2024-11-16T19:29:11,412 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d11ab77873cb%2C40933%2C1731785351288, suffix=, logDir=hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/MasterData/WALs/d11ab77873cb,40933,1731785351288, archiveDir=hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/MasterData/oldWALs, maxLogs=10 2024-11-16T19:29:11,412 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C40933%2C1731785351288.1731785351412 2024-11-16T19:29:11,417 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/MasterData/WALs/d11ab77873cb,40933,1731785351288/d11ab77873cb%2C40933%2C1731785351288.1731785351412 2024-11-16T19:29:11,420 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45053:45053),(127.0.0.1/127.0.0.1:39465:39465)] 2024-11-16T19:29:11,421 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T19:29:11,421 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:29:11,421 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:29:11,421 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:29:11,423 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:29:11,424 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T19:29:11,424 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:29:11,424 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:29:11,424 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:29:11,425 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T19:29:11,425 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:29:11,426 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T19:29:11,426 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:29:11,427 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T19:29:11,427 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:29:11,427 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T19:29:11,427 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:29:11,429 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T19:29:11,429 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:29:11,429 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T19:29:11,430 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:29:11,430 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:29:11,431 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:29:11,432 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:29:11,432 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:29:11,433 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T19:29:11,434 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:29:11,436 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T19:29:11,436 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=727344, jitterRate=-0.07513441145420074}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T19:29:11,437 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731785351421Initializing all the Stores at 1731785351422 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785351422Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785351422Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785351422Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785351422Cleaning up temporary data from old regions at 1731785351432 (+10 ms)Region opened successfully at 1731785351436 (+4 ms) 2024-11-16T19:29:11,437 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T19:29:11,439 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6117ca5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d11ab77873cb/172.17.0.2:0 2024-11-16T19:29:11,440 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T19:29:11,440 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T19:29:11,440 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T19:29:11,441 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T19:29:11,441 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-16T19:29:11,441 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T19:29:11,441 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T19:29:11,443 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T19:29:11,444 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40933-0x1004a02bcd90000, quorum=127.0.0.1:51672, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T19:29:11,445 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T19:29:11,445 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T19:29:11,446 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40933-0x1004a02bcd90000, quorum=127.0.0.1:51672, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T19:29:11,446 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T19:29:11,447 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T19:29:11,447 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40933-0x1004a02bcd90000, quorum=127.0.0.1:51672, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T19:29:11,448 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T19:29:11,448 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40933-0x1004a02bcd90000, quorum=127.0.0.1:51672, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T19:29:11,449 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T19:29:11,451 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40933-0x1004a02bcd90000, quorum=127.0.0.1:51672, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T19:29:11,451 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T19:29:11,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40933-0x1004a02bcd90000, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T19:29:11,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38717-0x1004a02bcd90001, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T19:29:11,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40933-0x1004a02bcd90000, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:29:11,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38717-0x1004a02bcd90001, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:29:11,452 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=d11ab77873cb,40933,1731785351288, sessionid=0x1004a02bcd90000, setting cluster-up flag (Was=false) 2024-11-16T19:29:11,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38717-0x1004a02bcd90001, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:29:11,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40933-0x1004a02bcd90000, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:29:11,456 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T19:29:11,457 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d11ab77873cb,40933,1731785351288 2024-11-16T19:29:11,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38717-0x1004a02bcd90001, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:29:11,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40933-0x1004a02bcd90000, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:29:11,461 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T19:29:11,462 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d11ab77873cb,40933,1731785351288 2024-11-16T19:29:11,463 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T19:29:11,465 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T19:29:11,465 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T19:29:11,465 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T19:29:11,465 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: d11ab77873cb,40933,1731785351288 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T19:29:11,466 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/d11ab77873cb:0, corePoolSize=5, maxPoolSize=5 2024-11-16T19:29:11,466 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/d11ab77873cb:0, corePoolSize=5, maxPoolSize=5 2024-11-16T19:29:11,466 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/d11ab77873cb:0, corePoolSize=5, maxPoolSize=5 2024-11-16T19:29:11,466 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/d11ab77873cb:0, corePoolSize=5, maxPoolSize=5 2024-11-16T19:29:11,467 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/d11ab77873cb:0, corePoolSize=10, maxPoolSize=10 2024-11-16T19:29:11,467 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:29:11,467 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/d11ab77873cb:0, corePoolSize=2, maxPoolSize=2 2024-11-16T19:29:11,467 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:29:11,468 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731785381468 2024-11-16T19:29:11,468 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T19:29:11,468 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T19:29:11,468 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T19:29:11,468 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T19:29:11,468 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T19:29:11,468 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T19:29:11,468 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T19:29:11,468 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T19:29:11,468 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T19:29:11,468 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T19:29:11,468 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T19:29:11,468 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T19:29:11,469 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T19:29:11,469 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T19:29:11,469 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.large.0-1731785351469,5,FailOnTimeoutGroup] 2024-11-16T19:29:11,469 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.small.0-1731785351469,5,FailOnTimeoutGroup] 2024-11-16T19:29:11,469 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T19:29:11,469 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T19:29:11,469 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T19:29:11,469 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T19:29:11,469 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:29:11,470 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T19:29:11,470 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:11,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39823 is added to blk_1073741831_1007 (size=1321) 2024-11-16T19:29:11,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38639 is added to blk_1073741831_1007 (size=1321) 2024-11-16T19:29:11,475 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:11,477 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T19:29:11,477 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef 2024-11-16T19:29:11,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39823 is added to blk_1073741832_1008 (size=32) 2024-11-16T19:29:11,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38639 is added to blk_1073741832_1008 (size=32) 2024-11-16T19:29:11,484 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:29:11,485 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T19:29:11,486 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T19:29:11,486 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:29:11,486 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:29:11,486 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T19:29:11,487 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T19:29:11,488 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:29:11,488 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:29:11,488 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T19:29:11,489 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T19:29:11,489 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:29:11,489 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:29:11,489 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T19:29:11,490 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T19:29:11,490 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:29:11,491 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:29:11,491 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T19:29:11,492 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/hbase/meta/1588230740 2024-11-16T19:29:11,492 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/hbase/meta/1588230740 2024-11-16T19:29:11,493 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T19:29:11,493 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T19:29:11,494 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T19:29:11,495 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T19:29:11,497 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T19:29:11,498 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=715071, jitterRate=-0.09074057638645172}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T19:29:11,499 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731785351484Initializing all the Stores at 1731785351484Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785351484Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785351485 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785351485Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785351485Cleaning up temporary data from old regions at 1731785351493 (+8 ms)Region opened successfully at 1731785351499 (+6 ms) 2024-11-16T19:29:11,500 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T19:29:11,500 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T19:29:11,500 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T19:29:11,500 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T19:29:11,500 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T19:29:11,500 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T19:29:11,500 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731785351499Disabling compacts and flushes for region at 1731785351499Disabling writes for close at 1731785351500 (+1 ms)Writing region close event to WAL at 1731785351500Closed at 1731785351500 2024-11-16T19:29:11,502 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T19:29:11,502 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T19:29:11,502 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T19:29:11,504 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T19:29:11,505 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T19:29:11,548 INFO [RS:0;d11ab77873cb:38717 {}] regionserver.HRegionServer(746): ClusterId : 36acdc00-333b-4fd1-8171-cc88c665bb85 2024-11-16T19:29:11,548 DEBUG [RS:0;d11ab77873cb:38717 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T19:29:11,550 DEBUG [RS:0;d11ab77873cb:38717 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T19:29:11,550 DEBUG [RS:0;d11ab77873cb:38717 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T19:29:11,551 DEBUG [RS:0;d11ab77873cb:38717 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T19:29:11,551 DEBUG [RS:0;d11ab77873cb:38717 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@324ab6cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d11ab77873cb/172.17.0.2:0 2024-11-16T19:29:11,561 DEBUG [RS:0;d11ab77873cb:38717 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;d11ab77873cb:38717 2024-11-16T19:29:11,561 INFO [RS:0;d11ab77873cb:38717 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T19:29:11,561 INFO [RS:0;d11ab77873cb:38717 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T19:29:11,561 DEBUG [RS:0;d11ab77873cb:38717 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T19:29:11,562 INFO [RS:0;d11ab77873cb:38717 {}] regionserver.HRegionServer(2659): reportForDuty to master=d11ab77873cb,40933,1731785351288 with port=38717, startcode=1731785351335 2024-11-16T19:29:11,562 DEBUG [RS:0;d11ab77873cb:38717 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T19:29:11,564 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37791, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T19:29:11,564 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40933 {}] master.ServerManager(363): Checking decommissioned status of RegionServer d11ab77873cb,38717,1731785351335 2024-11-16T19:29:11,564 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40933 {}] master.ServerManager(517): Registering regionserver=d11ab77873cb,38717,1731785351335 2024-11-16T19:29:11,566 DEBUG [RS:0;d11ab77873cb:38717 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef 2024-11-16T19:29:11,566 DEBUG [RS:0;d11ab77873cb:38717 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43173 2024-11-16T19:29:11,566 DEBUG [RS:0;d11ab77873cb:38717 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T19:29:11,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40933-0x1004a02bcd90000, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T19:29:11,568 DEBUG [RS:0;d11ab77873cb:38717 {}] zookeeper.ZKUtil(111): regionserver:38717-0x1004a02bcd90001, quorum=127.0.0.1:51672, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/d11ab77873cb,38717,1731785351335 2024-11-16T19:29:11,568 WARN [RS:0;d11ab77873cb:38717 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T19:29:11,568 INFO [RS:0;d11ab77873cb:38717 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T19:29:11,568 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [d11ab77873cb,38717,1731785351335] 2024-11-16T19:29:11,568 DEBUG [RS:0;d11ab77873cb:38717 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/WALs/d11ab77873cb,38717,1731785351335 2024-11-16T19:29:11,572 INFO [RS:0;d11ab77873cb:38717 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T19:29:11,574 INFO [RS:0;d11ab77873cb:38717 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T19:29:11,574 INFO [RS:0;d11ab77873cb:38717 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T19:29:11,574 INFO [RS:0;d11ab77873cb:38717 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T19:29:11,574 INFO [RS:0;d11ab77873cb:38717 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T19:29:11,575 INFO [RS:0;d11ab77873cb:38717 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T19:29:11,575 INFO [RS:0;d11ab77873cb:38717 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T19:29:11,575 DEBUG [RS:0;d11ab77873cb:38717 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:29:11,575 DEBUG [RS:0;d11ab77873cb:38717 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:29:11,575 DEBUG [RS:0;d11ab77873cb:38717 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:29:11,575 DEBUG [RS:0;d11ab77873cb:38717 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:29:11,575 DEBUG [RS:0;d11ab77873cb:38717 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:29:11,575 DEBUG [RS:0;d11ab77873cb:38717 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/d11ab77873cb:0, corePoolSize=2, maxPoolSize=2 2024-11-16T19:29:11,575 DEBUG [RS:0;d11ab77873cb:38717 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:29:11,575 DEBUG [RS:0;d11ab77873cb:38717 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:29:11,576 DEBUG [RS:0;d11ab77873cb:38717 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:29:11,576 DEBUG [RS:0;d11ab77873cb:38717 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:29:11,576 DEBUG [RS:0;d11ab77873cb:38717 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:29:11,576 DEBUG [RS:0;d11ab77873cb:38717 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:29:11,576 DEBUG [RS:0;d11ab77873cb:38717 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/d11ab77873cb:0, corePoolSize=3, maxPoolSize=3 2024-11-16T19:29:11,576 DEBUG [RS:0;d11ab77873cb:38717 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0, corePoolSize=3, maxPoolSize=3 2024-11-16T19:29:11,576 INFO [RS:0;d11ab77873cb:38717 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T19:29:11,576 INFO [RS:0;d11ab77873cb:38717 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T19:29:11,576 INFO [RS:0;d11ab77873cb:38717 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T19:29:11,576 INFO [RS:0;d11ab77873cb:38717 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T19:29:11,576 INFO [RS:0;d11ab77873cb:38717 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T19:29:11,577 INFO [RS:0;d11ab77873cb:38717 {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,38717,1731785351335-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T19:29:11,589 INFO [RS:0;d11ab77873cb:38717 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T19:29:11,590 INFO [RS:0;d11ab77873cb:38717 {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,38717,1731785351335-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T19:29:11,590 INFO [RS:0;d11ab77873cb:38717 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:29:11,590 INFO [RS:0;d11ab77873cb:38717 {}] regionserver.Replication(171): d11ab77873cb,38717,1731785351335 started 2024-11-16T19:29:11,603 INFO [RS:0;d11ab77873cb:38717 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:29:11,603 INFO [RS:0;d11ab77873cb:38717 {}] regionserver.HRegionServer(1482): Serving as d11ab77873cb,38717,1731785351335, RpcServer on d11ab77873cb/172.17.0.2:38717, sessionid=0x1004a02bcd90001 2024-11-16T19:29:11,603 DEBUG [RS:0;d11ab77873cb:38717 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T19:29:11,603 DEBUG [RS:0;d11ab77873cb:38717 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager d11ab77873cb,38717,1731785351335 2024-11-16T19:29:11,603 DEBUG [RS:0;d11ab77873cb:38717 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd11ab77873cb,38717,1731785351335' 2024-11-16T19:29:11,603 DEBUG [RS:0;d11ab77873cb:38717 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T19:29:11,604 DEBUG [RS:0;d11ab77873cb:38717 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T19:29:11,604 DEBUG [RS:0;d11ab77873cb:38717 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T19:29:11,604 DEBUG [RS:0;d11ab77873cb:38717 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T19:29:11,604 DEBUG [RS:0;d11ab77873cb:38717 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager d11ab77873cb,38717,1731785351335 2024-11-16T19:29:11,604 DEBUG [RS:0;d11ab77873cb:38717 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd11ab77873cb,38717,1731785351335' 2024-11-16T19:29:11,604 DEBUG [RS:0;d11ab77873cb:38717 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T19:29:11,605 DEBUG [RS:0;d11ab77873cb:38717 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T19:29:11,605 DEBUG [RS:0;d11ab77873cb:38717 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T19:29:11,605 INFO [RS:0;d11ab77873cb:38717 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T19:29:11,605 INFO [RS:0;d11ab77873cb:38717 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T19:29:11,655 WARN [d11ab77873cb:40933 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-16T19:29:11,707 INFO [RS:0;d11ab77873cb:38717 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d11ab77873cb%2C38717%2C1731785351335, suffix=, logDir=hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/WALs/d11ab77873cb,38717,1731785351335, archiveDir=hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/oldWALs, maxLogs=32 2024-11-16T19:29:11,707 INFO [RS:0;d11ab77873cb:38717 {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C38717%2C1731785351335.1731785351707 2024-11-16T19:29:11,713 INFO [RS:0;d11ab77873cb:38717 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/WALs/d11ab77873cb,38717,1731785351335/d11ab77873cb%2C38717%2C1731785351335.1731785351707 2024-11-16T19:29:11,714 DEBUG [RS:0;d11ab77873cb:38717 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45053:45053),(127.0.0.1/127.0.0.1:39465:39465)] 2024-11-16T19:29:11,906 DEBUG [d11ab77873cb:40933 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T19:29:11,906 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=d11ab77873cb,38717,1731785351335 2024-11-16T19:29:11,908 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d11ab77873cb,38717,1731785351335, state=OPENING 2024-11-16T19:29:11,909 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T19:29:11,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38717-0x1004a02bcd90001, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:29:11,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40933-0x1004a02bcd90000, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:29:11,911 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T19:29:11,911 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T19:29:11,911 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=d11ab77873cb,38717,1731785351335}] 2024-11-16T19:29:11,911 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T19:29:12,066 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T19:29:12,070 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58897, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T19:29:12,075 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T19:29:12,075 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T19:29:12,077 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d11ab77873cb%2C38717%2C1731785351335.meta, suffix=.meta, logDir=hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/WALs/d11ab77873cb,38717,1731785351335, archiveDir=hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/oldWALs, maxLogs=32 2024-11-16T19:29:12,077 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C38717%2C1731785351335.meta.1731785352077.meta 2024-11-16T19:29:12,084 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/WALs/d11ab77873cb,38717,1731785351335/d11ab77873cb%2C38717%2C1731785351335.meta.1731785352077.meta 2024-11-16T19:29:12,086 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39465:39465),(127.0.0.1/127.0.0.1:45053:45053)] 2024-11-16T19:29:12,087 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T19:29:12,087 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T19:29:12,087 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T19:29:12,087 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T19:29:12,087 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T19:29:12,087 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:29:12,087 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T19:29:12,087 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T19:29:12,089 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T19:29:12,090 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T19:29:12,090 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:29:12,090 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:29:12,090 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T19:29:12,091 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T19:29:12,091 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:29:12,092 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:29:12,092 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T19:29:12,092 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T19:29:12,092 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:29:12,093 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:29:12,093 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T19:29:12,093 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T19:29:12,093 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:29:12,094 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:29:12,094 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T19:29:12,094 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/hbase/meta/1588230740 2024-11-16T19:29:12,095 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/hbase/meta/1588230740 2024-11-16T19:29:12,096 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T19:29:12,096 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T19:29:12,097 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T19:29:12,098 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T19:29:12,099 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=747497, jitterRate=-0.04950912296772003}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T19:29:12,099 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T19:29:12,099 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731785352087Writing region info on filesystem at 1731785352087Initializing all the Stores at 1731785352088 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785352088Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785352089 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785352089Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785352089Cleaning up temporary data from old regions at 1731785352096 (+7 ms)Running coprocessor post-open hooks at 1731785352099 (+3 ms)Region opened successfully at 1731785352099 2024-11-16T19:29:12,100 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731785352065 2024-11-16T19:29:12,103 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T19:29:12,103 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T19:29:12,104 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=d11ab77873cb,38717,1731785351335 2024-11-16T19:29:12,104 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d11ab77873cb,38717,1731785351335, state=OPEN 2024-11-16T19:29:12,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38717-0x1004a02bcd90001, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T19:29:12,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40933-0x1004a02bcd90000, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T19:29:12,106 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=d11ab77873cb,38717,1731785351335 2024-11-16T19:29:12,107 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T19:29:12,107 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T19:29:12,109 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T19:29:12,109 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=d11ab77873cb,38717,1731785351335 in 195 msec 2024-11-16T19:29:12,112 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T19:29:12,112 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 607 msec 2024-11-16T19:29:12,112 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T19:29:12,112 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T19:29:12,114 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T19:29:12,114 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d11ab77873cb,38717,1731785351335, seqNum=-1] 2024-11-16T19:29:12,114 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T19:29:12,116 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59427, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T19:29:12,121 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 656 msec 2024-11-16T19:29:12,121 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731785352121, completionTime=-1 2024-11-16T19:29:12,121 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T19:29:12,121 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-16T19:29:12,123 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-16T19:29:12,123 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731785412123 2024-11-16T19:29:12,123 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731785472123 2024-11-16T19:29:12,123 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-16T19:29:12,123 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,40933,1731785351288-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T19:29:12,123 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,40933,1731785351288-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:29:12,123 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,40933,1731785351288-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:29:12,123 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-d11ab77873cb:40933, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:29:12,123 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T19:29:12,123 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T19:29:12,125 DEBUG [master/d11ab77873cb:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T19:29:12,126 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.765sec 2024-11-16T19:29:12,127 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T19:29:12,127 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T19:29:12,127 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T19:29:12,127 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T19:29:12,127 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T19:29:12,127 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,40933,1731785351288-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T19:29:12,127 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,40933,1731785351288-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T19:29:12,129 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T19:29:12,129 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T19:29:12,129 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,40933,1731785351288-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:29:12,148 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f17b515, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T19:29:12,148 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request d11ab77873cb,40933,-1 for getting cluster id 2024-11-16T19:29:12,148 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T19:29:12,150 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '36acdc00-333b-4fd1-8171-cc88c665bb85' 2024-11-16T19:29:12,150 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T19:29:12,151 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "36acdc00-333b-4fd1-8171-cc88c665bb85" 2024-11-16T19:29:12,151 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d0956ae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T19:29:12,151 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [d11ab77873cb,40933,-1] 2024-11-16T19:29:12,151 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T19:29:12,151 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:29:12,152 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50024, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T19:29:12,153 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10c2896a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T19:29:12,154 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T19:29:12,154 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d11ab77873cb,38717,1731785351335, seqNum=-1] 2024-11-16T19:29:12,155 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T19:29:12,156 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35586, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T19:29:12,157 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=d11ab77873cb,40933,1731785351288 2024-11-16T19:29:12,157 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:29:12,160 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T19:29:12,160 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-16T19:29:12,161 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is d11ab77873cb,40933,1731785351288 2024-11-16T19:29:12,161 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5cbfb225 2024-11-16T19:29:12,161 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-16T19:29:12,162 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50038, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-16T19:29:12,162 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40933 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-16T19:29:12,162 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40933 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-16T19:29:12,163 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40933 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T19:29:12,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40933 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T19:29:12,165 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T19:29:12,165 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:29:12,165 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40933 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-16T19:29:12,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40933 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T19:29:12,167 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T19:29:12,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38639 is added to blk_1073741835_1011 (size=405) 2024-11-16T19:29:12,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39823 is added to blk_1073741835_1011 (size=405) 2024-11-16T19:29:12,175 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 27e34c95ac096e07b393aad7e081df0c, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef 2024-11-16T19:29:12,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38639 is added to blk_1073741836_1012 (size=88) 2024-11-16T19:29:12,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39823 is added to blk_1073741836_1012 (size=88) 2024-11-16T19:29:12,183 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:29:12,183 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 27e34c95ac096e07b393aad7e081df0c, disabling compactions & flushes 2024-11-16T19:29:12,183 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c. 2024-11-16T19:29:12,183 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c. 2024-11-16T19:29:12,183 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c. after waiting 0 ms 2024-11-16T19:29:12,183 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c. 2024-11-16T19:29:12,183 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c. 2024-11-16T19:29:12,183 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 27e34c95ac096e07b393aad7e081df0c: Waiting for close lock at 1731785352183Disabling compacts and flushes for region at 1731785352183Disabling writes for close at 1731785352183Writing region close event to WAL at 1731785352183Closed at 1731785352183 2024-11-16T19:29:12,184 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T19:29:12,185 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731785352184"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731785352184"}]},"ts":"1731785352184"} 2024-11-16T19:29:12,187 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-16T19:29:12,188 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T19:29:12,188 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731785352188"}]},"ts":"1731785352188"} 2024-11-16T19:29:12,190 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-16T19:29:12,191 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=27e34c95ac096e07b393aad7e081df0c, ASSIGN}] 2024-11-16T19:29:12,192 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=27e34c95ac096e07b393aad7e081df0c, ASSIGN 2024-11-16T19:29:12,193 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=27e34c95ac096e07b393aad7e081df0c, ASSIGN; state=OFFLINE, location=d11ab77873cb,38717,1731785351335; forceNewPlan=false, retain=false 2024-11-16T19:29:12,345 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=27e34c95ac096e07b393aad7e081df0c, regionState=OPENING, regionLocation=d11ab77873cb,38717,1731785351335 2024-11-16T19:29:12,350 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=27e34c95ac096e07b393aad7e081df0c, ASSIGN because future has completed 2024-11-16T19:29:12,352 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 27e34c95ac096e07b393aad7e081df0c, server=d11ab77873cb,38717,1731785351335}] 2024-11-16T19:29:12,471 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:12,476 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:12,517 INFO [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c. 2024-11-16T19:29:12,517 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 27e34c95ac096e07b393aad7e081df0c, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c.', STARTKEY => '', ENDKEY => ''} 2024-11-16T19:29:12,517 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 27e34c95ac096e07b393aad7e081df0c 2024-11-16T19:29:12,517 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:29:12,517 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 27e34c95ac096e07b393aad7e081df0c 2024-11-16T19:29:12,517 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 27e34c95ac096e07b393aad7e081df0c 2024-11-16T19:29:12,519 INFO [StoreOpener-27e34c95ac096e07b393aad7e081df0c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 27e34c95ac096e07b393aad7e081df0c 2024-11-16T19:29:12,521 INFO [StoreOpener-27e34c95ac096e07b393aad7e081df0c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 27e34c95ac096e07b393aad7e081df0c columnFamilyName info 2024-11-16T19:29:12,521 DEBUG [StoreOpener-27e34c95ac096e07b393aad7e081df0c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:29:12,522 INFO [StoreOpener-27e34c95ac096e07b393aad7e081df0c-1 {}] regionserver.HStore(327): Store=27e34c95ac096e07b393aad7e081df0c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T19:29:12,522 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 27e34c95ac096e07b393aad7e081df0c 2024-11-16T19:29:12,523 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c 2024-11-16T19:29:12,523 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c 2024-11-16T19:29:12,524 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 27e34c95ac096e07b393aad7e081df0c 2024-11-16T19:29:12,524 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 27e34c95ac096e07b393aad7e081df0c 2024-11-16T19:29:12,526 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 27e34c95ac096e07b393aad7e081df0c 2024-11-16T19:29:12,529 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T19:29:12,529 INFO [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 27e34c95ac096e07b393aad7e081df0c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=796149, jitterRate=0.012356653809547424}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T19:29:12,529 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 27e34c95ac096e07b393aad7e081df0c 2024-11-16T19:29:12,530 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 27e34c95ac096e07b393aad7e081df0c: Running coprocessor pre-open hook at 1731785352517Writing region info on filesystem at 1731785352517Initializing all the Stores at 1731785352519 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785352519Cleaning up temporary data from old regions at 1731785352524 (+5 ms)Running coprocessor post-open hooks at 1731785352529 (+5 ms)Region opened successfully at 1731785352530 (+1 ms) 2024-11-16T19:29:12,531 INFO [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c., pid=6, masterSystemTime=1731785352506 2024-11-16T19:29:12,533 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c. 2024-11-16T19:29:12,533 INFO [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c. 2024-11-16T19:29:12,534 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=27e34c95ac096e07b393aad7e081df0c, regionState=OPEN, openSeqNum=2, regionLocation=d11ab77873cb,38717,1731785351335 2024-11-16T19:29:12,536 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 27e34c95ac096e07b393aad7e081df0c, server=d11ab77873cb,38717,1731785351335 because future has completed 2024-11-16T19:29:12,539 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-16T19:29:12,540 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 27e34c95ac096e07b393aad7e081df0c, server=d11ab77873cb,38717,1731785351335 in 186 msec 2024-11-16T19:29:12,542 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-16T19:29:12,542 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=27e34c95ac096e07b393aad7e081df0c, ASSIGN in 349 msec 2024-11-16T19:29:12,543 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T19:29:12,543 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731785352543"}]},"ts":"1731785352543"} 2024-11-16T19:29:12,545 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-16T19:29:12,546 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T19:29:12,548 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 383 msec 2024-11-16T19:29:13,472 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:13,477 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:13,959 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T19:29:13,961 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:13,961 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:13,961 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:13,962 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:13,962 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:13,963 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:13,980 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:13,980 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:13,980 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:13,981 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:13,981 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:13,981 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:13,984 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:13,984 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:13,985 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:13,990 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:29:14,473 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:14,478 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:15,474 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:15,479 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:16,475 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:16,480 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:17,476 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:17,480 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:17,573 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T19:29:17,574 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-16T19:29:18,454 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T19:29:18,454 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-16T19:29:18,456 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T19:29:18,456 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-16T19:29:18,456 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-16T19:29:18,456 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-16T19:29:18,457 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T19:29:18,457 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-16T19:29:18,477 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:18,481 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:19,478 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:19,482 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:20,479 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:20,483 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:21,480 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:21,484 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:22,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40933 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T19:29:22,199 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-16T19:29:22,199 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-16T19:29:22,202 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T19:29:22,202 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c. 2024-11-16T19:29:22,206 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c., hostname=d11ab77873cb,38717,1731785351335, seqNum=2] 2024-11-16T19:29:22,212 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40933 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T19:29:22,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40933 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T19:29:22,219 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-16T19:29:22,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40933 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-16T19:29:22,220 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-16T19:29:22,221 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-16T19:29:22,382 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38717 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-16T19:29:22,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c. 2024-11-16T19:29:22,383 INFO [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 27e34c95ac096e07b393aad7e081df0c 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-16T19:29:22,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/.tmp/info/c55daddcfaf1406999bdd09c128d673d is 1080, key is row0001/info:/1731785362207/Put/seqid=0 2024-11-16T19:29:22,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38639 is added to blk_1073741837_1013 (size=6033) 2024-11-16T19:29:22,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39823 is added to blk_1073741837_1013 (size=6033) 2024-11-16T19:29:22,408 INFO [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/.tmp/info/c55daddcfaf1406999bdd09c128d673d 2024-11-16T19:29:22,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/.tmp/info/c55daddcfaf1406999bdd09c128d673d as hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/info/c55daddcfaf1406999bdd09c128d673d 2024-11-16T19:29:22,425 INFO [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/info/c55daddcfaf1406999bdd09c128d673d, entries=1, sequenceid=5, filesize=5.9 K 2024-11-16T19:29:22,426 INFO [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 27e34c95ac096e07b393aad7e081df0c in 44ms, sequenceid=5, compaction requested=false 2024-11-16T19:29:22,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 27e34c95ac096e07b393aad7e081df0c: 2024-11-16T19:29:22,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c. 2024-11-16T19:29:22,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-16T19:29:22,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40933 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-16T19:29:22,435 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-16T19:29:22,435 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 210 msec 2024-11-16T19:29:22,438 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 221 msec 2024-11-16T19:29:22,481 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:22,484 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:23,482 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:23,485 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:24,483 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:24,486 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:25,484 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:25,487 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:26,485 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:26,488 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:27,487 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:27,489 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:28,488 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:28,490 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:29,489 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:29,491 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:30,490 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:30,492 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:31,491 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:31,493 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:32,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40933 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-16T19:29:32,290 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-16T19:29:32,295 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40933 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T19:29:32,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40933 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T19:29:32,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40933 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-16T19:29:32,298 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-16T19:29:32,300 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-16T19:29:32,300 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-16T19:29:32,454 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38717 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-16T19:29:32,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c. 2024-11-16T19:29:32,455 INFO [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 27e34c95ac096e07b393aad7e081df0c 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-16T19:29:32,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/.tmp/info/14295e12561044ed9b6191ff6963a6b5 is 1080, key is row0002/info:/1731785372293/Put/seqid=0 2024-11-16T19:29:32,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39823 is added to blk_1073741838_1014 (size=6033) 2024-11-16T19:29:32,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38639 is added to blk_1073741838_1014 (size=6033) 2024-11-16T19:29:32,471 INFO [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/.tmp/info/14295e12561044ed9b6191ff6963a6b5 2024-11-16T19:29:32,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/.tmp/info/14295e12561044ed9b6191ff6963a6b5 as hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/info/14295e12561044ed9b6191ff6963a6b5 2024-11-16T19:29:32,484 INFO [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/info/14295e12561044ed9b6191ff6963a6b5, entries=1, sequenceid=9, filesize=5.9 K 2024-11-16T19:29:32,486 INFO [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 27e34c95ac096e07b393aad7e081df0c in 30ms, sequenceid=9, compaction requested=false 2024-11-16T19:29:32,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 27e34c95ac096e07b393aad7e081df0c: 2024-11-16T19:29:32,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c. 2024-11-16T19:29:32,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-16T19:29:32,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40933 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-16T19:29:32,490 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-16T19:29:32,490 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 188 msec 2024-11-16T19:29:32,492 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 195 msec 2024-11-16T19:29:32,492 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:32,494 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:33,493 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:33,494 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:34,494 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:34,495 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:35,495 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:35,496 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:36,497 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:36,497 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:37,499 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:37,499 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:38,501 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:38,501 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:38,502 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta after 68063ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor204.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:29:38,502 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 after 68075ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor204.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T19:29:39,504 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:39,504 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:40,506 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:40,506 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:41,272 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T19:29:41,507 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:41,507 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:42,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40933 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-16T19:29:42,410 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-16T19:29:42,415 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C38717%2C1731785351335.1731785382415 2024-11-16T19:29:42,421 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:42,421 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:42,421 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:42,421 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:42,421 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:42,422 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/WALs/d11ab77873cb,38717,1731785351335/d11ab77873cb%2C38717%2C1731785351335.1731785351707 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/WALs/d11ab77873cb,38717,1731785351335/d11ab77873cb%2C38717%2C1731785351335.1731785382415 2024-11-16T19:29:42,423 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45053:45053),(127.0.0.1/127.0.0.1:39465:39465)] 2024-11-16T19:29:42,423 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/WALs/d11ab77873cb,38717,1731785351335/d11ab77873cb%2C38717%2C1731785351335.1731785351707 is not closed yet, will try archiving it next time 2024-11-16T19:29:42,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38639 is added to blk_1073741833_1009 (size=5546) 2024-11-16T19:29:42,424 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40933 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T19:29:42,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39823 is added to blk_1073741833_1009 (size=5546) 2024-11-16T19:29:42,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40933 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T19:29:42,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40933 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-16T19:29:42,427 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-16T19:29:42,429 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-16T19:29:42,429 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-16T19:29:42,508 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:42,508 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:42,584 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38717 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-16T19:29:42,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c. 2024-11-16T19:29:42,585 INFO [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 27e34c95ac096e07b393aad7e081df0c 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-16T19:29:42,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/.tmp/info/eb220f80cf96423686b5011c1caf5829 is 1080, key is row0003/info:/1731785382413/Put/seqid=0 2024-11-16T19:29:42,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38639 is added to blk_1073741840_1016 (size=6033) 2024-11-16T19:29:42,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39823 is added to blk_1073741840_1016 (size=6033) 2024-11-16T19:29:42,602 INFO [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/.tmp/info/eb220f80cf96423686b5011c1caf5829 2024-11-16T19:29:42,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/.tmp/info/eb220f80cf96423686b5011c1caf5829 as hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/info/eb220f80cf96423686b5011c1caf5829 2024-11-16T19:29:42,616 INFO [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/info/eb220f80cf96423686b5011c1caf5829, entries=1, sequenceid=13, filesize=5.9 K 2024-11-16T19:29:42,617 INFO [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 27e34c95ac096e07b393aad7e081df0c in 32ms, sequenceid=13, compaction requested=true 2024-11-16T19:29:42,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 27e34c95ac096e07b393aad7e081df0c: 2024-11-16T19:29:42,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c. 2024-11-16T19:29:42,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-16T19:29:42,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40933 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-16T19:29:42,621 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-16T19:29:42,621 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 189 msec 2024-11-16T19:29:42,623 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 197 msec 2024-11-16T19:29:43,509 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:43,509 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:44,510 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:44,510 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:45,511 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:45,511 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:46,512 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:46,513 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:47,514 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:47,514 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:48,515 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:48,515 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:49,516 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:49,516 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:50,516 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:50,516 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:51,518 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:51,518 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:52,149 INFO [master/d11ab77873cb:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-16T19:29:52,149 INFO [master/d11ab77873cb:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-16T19:29:52,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40933 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-16T19:29:52,458 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-16T19:29:52,459 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T19:29:52,460 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T19:29:52,460 DEBUG [Time-limited test {}] regionserver.HStore(1541): 27e34c95ac096e07b393aad7e081df0c/info is initiating minor compaction (all files) 2024-11-16T19:29:52,460 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T19:29:52,460 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T19:29:52,461 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 27e34c95ac096e07b393aad7e081df0c/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c. 2024-11-16T19:29:52,461 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/info/c55daddcfaf1406999bdd09c128d673d, hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/info/14295e12561044ed9b6191ff6963a6b5, hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/info/eb220f80cf96423686b5011c1caf5829] into tmpdir=hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/.tmp, totalSize=17.7 K 2024-11-16T19:29:52,461 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting c55daddcfaf1406999bdd09c128d673d, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731785362207 2024-11-16T19:29:52,462 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 14295e12561044ed9b6191ff6963a6b5, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731785372293 2024-11-16T19:29:52,462 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting eb220f80cf96423686b5011c1caf5829, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731785382413 2024-11-16T19:29:52,484 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 27e34c95ac096e07b393aad7e081df0c#info#compaction#44 average throughput is 3.08 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T19:29:52,485 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/.tmp/info/66ae8c957cae45b49689f3808ac3ca3e is 1080, key is row0001/info:/1731785362207/Put/seqid=0 2024-11-16T19:29:52,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39823 is added to blk_1073741841_1017 (size=8296) 2024-11-16T19:29:52,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38639 is added to blk_1073741841_1017 (size=8296) 2024-11-16T19:29:52,510 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/.tmp/info/66ae8c957cae45b49689f3808ac3ca3e as hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/info/66ae8c957cae45b49689f3808ac3ca3e 2024-11-16T19:29:52,518 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:52,518 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:52,522 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 27e34c95ac096e07b393aad7e081df0c/info of 27e34c95ac096e07b393aad7e081df0c into 66ae8c957cae45b49689f3808ac3ca3e(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T19:29:52,522 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 27e34c95ac096e07b393aad7e081df0c: 2024-11-16T19:29:52,527 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C38717%2C1731785351335.1731785392527 2024-11-16T19:29:52,549 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:52,549 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:52,549 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:52,549 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:52,549 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:29:52,550 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/WALs/d11ab77873cb,38717,1731785351335/d11ab77873cb%2C38717%2C1731785351335.1731785382415 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/WALs/d11ab77873cb,38717,1731785351335/d11ab77873cb%2C38717%2C1731785351335.1731785392527 2024-11-16T19:29:52,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38639 is added to blk_1073741839_1015 (size=2520) 2024-11-16T19:29:52,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39823 is added to blk_1073741839_1015 (size=2520) 2024-11-16T19:29:52,561 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/WALs/d11ab77873cb,38717,1731785351335/d11ab77873cb%2C38717%2C1731785351335.1731785351707 to hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/oldWALs/d11ab77873cb%2C38717%2C1731785351335.1731785351707 2024-11-16T19:29:52,561 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39465:39465),(127.0.0.1/127.0.0.1:45053:45053)] 2024-11-16T19:29:52,562 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40933 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T19:29:52,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40933 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T19:29:52,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40933 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-16T19:29:52,565 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-16T19:29:52,566 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-16T19:29:52,566 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-16T19:29:52,721 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38717 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-16T19:29:52,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c. 2024-11-16T19:29:52,721 INFO [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 27e34c95ac096e07b393aad7e081df0c 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-16T19:29:52,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/.tmp/info/dcd5fc0d22514c08863a54c1fe9887b4 is 1080, key is row0000/info:/1731785392524/Put/seqid=0 2024-11-16T19:29:52,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38639 is added to blk_1073741843_1019 (size=6033) 2024-11-16T19:29:52,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39823 is added to blk_1073741843_1019 (size=6033) 2024-11-16T19:29:52,734 INFO [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/.tmp/info/dcd5fc0d22514c08863a54c1fe9887b4 2024-11-16T19:29:52,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/.tmp/info/dcd5fc0d22514c08863a54c1fe9887b4 as hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/info/dcd5fc0d22514c08863a54c1fe9887b4 2024-11-16T19:29:52,748 INFO [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/info/dcd5fc0d22514c08863a54c1fe9887b4, entries=1, sequenceid=18, filesize=5.9 K 2024-11-16T19:29:52,749 INFO [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 27e34c95ac096e07b393aad7e081df0c in 28ms, sequenceid=18, compaction requested=false 2024-11-16T19:29:52,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 27e34c95ac096e07b393aad7e081df0c: 2024-11-16T19:29:52,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c. 2024-11-16T19:29:52,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-16T19:29:52,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40933 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-16T19:29:52,755 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-16T19:29:52,755 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 186 msec 2024-11-16T19:29:52,758 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 194 msec 2024-11-16T19:29:53,519 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:53,519 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:54,520 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:54,520 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:55,521 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:55,521 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:56,523 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:56,525 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:57,517 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 27e34c95ac096e07b393aad7e081df0c, had cached 0 bytes from a total of 14329 2024-11-16T19:29:57,524 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:57,526 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:58,525 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:58,527 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:59,526 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:29:59,528 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:00,527 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:00,528 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:01,527 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:01,529 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:02,528 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:02,530 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:02,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40933 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-16T19:30:02,578 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-16T19:30:02,580 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C38717%2C1731785351335.1731785402580 2024-11-16T19:30:02,586 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:30:02,586 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:30:02,586 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:30:02,586 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:30:02,586 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:30:02,586 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/WALs/d11ab77873cb,38717,1731785351335/d11ab77873cb%2C38717%2C1731785351335.1731785392527 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/WALs/d11ab77873cb,38717,1731785351335/d11ab77873cb%2C38717%2C1731785351335.1731785402580 2024-11-16T19:30:02,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39823 is added to blk_1073741842_1018 (size=2026) 2024-11-16T19:30:02,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38639 is added to blk_1073741842_1018 (size=2026) 2024-11-16T19:30:02,588 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/WALs/d11ab77873cb,38717,1731785351335/d11ab77873cb%2C38717%2C1731785351335.1731785382415 to hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/oldWALs/d11ab77873cb%2C38717%2C1731785351335.1731785382415 2024-11-16T19:30:02,592 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45053:45053),(127.0.0.1/127.0.0.1:39465:39465)] 2024-11-16T19:30:02,592 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T19:30:02,592 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T19:30:02,593 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T19:30:02,593 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:30:02,593 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:30:02,593 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T19:30:02,593 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T19:30:02,593 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1432178218, stopped=false 2024-11-16T19:30:02,593 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=d11ab77873cb,40933,1731785351288 2024-11-16T19:30:02,594 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38717-0x1004a02bcd90001, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T19:30:02,594 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40933-0x1004a02bcd90000, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T19:30:02,594 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40933-0x1004a02bcd90000, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:30:02,594 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38717-0x1004a02bcd90001, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:30:02,594 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T19:30:02,594 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T19:30:02,595 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T19:30:02,595 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:30:02,595 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'd11ab77873cb,38717,1731785351335' ***** 2024-11-16T19:30:02,595 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T19:30:02,595 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40933-0x1004a02bcd90000, quorum=127.0.0.1:51672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T19:30:02,595 INFO [RS:0;d11ab77873cb:38717 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T19:30:02,595 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T19:30:02,595 INFO [RS:0;d11ab77873cb:38717 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T19:30:02,595 INFO [RS:0;d11ab77873cb:38717 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T19:30:02,595 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38717-0x1004a02bcd90001, quorum=127.0.0.1:51672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T19:30:02,595 INFO [RS:0;d11ab77873cb:38717 {}] regionserver.HRegionServer(3091): Received CLOSE for 27e34c95ac096e07b393aad7e081df0c 2024-11-16T19:30:02,601 INFO [RS:0;d11ab77873cb:38717 {}] regionserver.HRegionServer(959): stopping server d11ab77873cb,38717,1731785351335 2024-11-16T19:30:02,601 INFO [RS:0;d11ab77873cb:38717 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T19:30:02,601 INFO [RS:0;d11ab77873cb:38717 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;d11ab77873cb:38717. 2024-11-16T19:30:02,601 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 27e34c95ac096e07b393aad7e081df0c, disabling compactions & flushes 2024-11-16T19:30:02,601 INFO [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c. 2024-11-16T19:30:02,601 DEBUG [RS:0;d11ab77873cb:38717 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T19:30:02,601 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c. 2024-11-16T19:30:02,601 DEBUG [RS:0;d11ab77873cb:38717 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:30:02,601 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c. after waiting 0 ms 2024-11-16T19:30:02,601 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c. 2024-11-16T19:30:02,601 INFO [RS:0;d11ab77873cb:38717 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T19:30:02,601 INFO [RS:0;d11ab77873cb:38717 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T19:30:02,601 INFO [RS:0;d11ab77873cb:38717 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T19:30:02,601 INFO [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 27e34c95ac096e07b393aad7e081df0c 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-16T19:30:02,601 INFO [RS:0;d11ab77873cb:38717 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T19:30:02,602 INFO [RS:0;d11ab77873cb:38717 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-16T19:30:02,602 DEBUG [RS:0;d11ab77873cb:38717 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 27e34c95ac096e07b393aad7e081df0c=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c.} 2024-11-16T19:30:02,602 DEBUG [RS:0;d11ab77873cb:38717 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 27e34c95ac096e07b393aad7e081df0c 2024-11-16T19:30:02,602 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T19:30:02,602 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T19:30:02,602 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T19:30:02,602 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T19:30:02,602 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T19:30:02,602 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-16T19:30:02,606 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/.tmp/info/80734f787b114da2be1c90cf57248dbd is 1080, key is row0001/info:/1731785402579/Put/seqid=0 2024-11-16T19:30:02,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39823 is added to blk_1073741845_1021 (size=6033) 2024-11-16T19:30:02,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38639 is added to blk_1073741845_1021 (size=6033) 2024-11-16T19:30:02,612 INFO [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/.tmp/info/80734f787b114da2be1c90cf57248dbd 2024-11-16T19:30:02,619 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/.tmp/info/80734f787b114da2be1c90cf57248dbd as hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/info/80734f787b114da2be1c90cf57248dbd 2024-11-16T19:30:02,622 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/hbase/meta/1588230740/.tmp/info/13f9c95d26b14b40a94fc481db4e8688 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c./info:regioninfo/1731785352534/Put/seqid=0 2024-11-16T19:30:02,625 INFO [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/info/80734f787b114da2be1c90cf57248dbd, entries=1, sequenceid=22, filesize=5.9 K 2024-11-16T19:30:02,626 INFO [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 27e34c95ac096e07b393aad7e081df0c in 25ms, sequenceid=22, compaction requested=true 2024-11-16T19:30:02,627 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/info/c55daddcfaf1406999bdd09c128d673d, hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/info/14295e12561044ed9b6191ff6963a6b5, hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/info/eb220f80cf96423686b5011c1caf5829] to archive 2024-11-16T19:30:02,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39823 is added to blk_1073741846_1022 (size=7308) 2024-11-16T19:30:02,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38639 is added to blk_1073741846_1022 (size=7308) 2024-11-16T19:30:02,628 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T19:30:02,628 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/hbase/meta/1588230740/.tmp/info/13f9c95d26b14b40a94fc481db4e8688 2024-11-16T19:30:02,630 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/info/c55daddcfaf1406999bdd09c128d673d to hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/info/c55daddcfaf1406999bdd09c128d673d 2024-11-16T19:30:02,631 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/info/14295e12561044ed9b6191ff6963a6b5 to hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/info/14295e12561044ed9b6191ff6963a6b5 2024-11-16T19:30:02,632 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/info/eb220f80cf96423686b5011c1caf5829 to hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/info/eb220f80cf96423686b5011c1caf5829 2024-11-16T19:30:02,632 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=d11ab77873cb:40933 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-16T19:30:02,633 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [c55daddcfaf1406999bdd09c128d673d=6033, 14295e12561044ed9b6191ff6963a6b5=6033, eb220f80cf96423686b5011c1caf5829=6033] 2024-11-16T19:30:02,636 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/27e34c95ac096e07b393aad7e081df0c/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-16T19:30:02,637 INFO [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c. 2024-11-16T19:30:02,637 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 27e34c95ac096e07b393aad7e081df0c: Waiting for close lock at 1731785402601Running coprocessor pre-close hooks at 1731785402601Disabling compacts and flushes for region at 1731785402601Disabling writes for close at 1731785402601Obtaining lock to block concurrent updates at 1731785402601Preparing flush snapshotting stores in 27e34c95ac096e07b393aad7e081df0c at 1731785402601Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731785402602 (+1 ms)Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c. at 1731785402602Flushing 27e34c95ac096e07b393aad7e081df0c/info: creating writer at 1731785402602Flushing 27e34c95ac096e07b393aad7e081df0c/info: appending metadata at 1731785402605 (+3 ms)Flushing 27e34c95ac096e07b393aad7e081df0c/info: closing flushed file at 1731785402605Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7419a97c: reopening flushed file at 1731785402618 (+13 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 27e34c95ac096e07b393aad7e081df0c in 25ms, sequenceid=22, compaction requested=true at 1731785402627 (+9 ms)Writing region close event to WAL at 1731785402633 (+6 ms)Running coprocessor post-close hooks at 1731785402637 (+4 ms)Closed at 1731785402637 2024-11-16T19:30:02,637 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731785352162.27e34c95ac096e07b393aad7e081df0c. 2024-11-16T19:30:02,646 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/hbase/meta/1588230740/.tmp/ns/fe67082dd86a49dca1432a88a6fac940 is 43, key is default/ns:d/1731785352116/Put/seqid=0 2024-11-16T19:30:02,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38639 is added to blk_1073741847_1023 (size=5153) 2024-11-16T19:30:02,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39823 is added to blk_1073741847_1023 (size=5153) 2024-11-16T19:30:02,651 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/hbase/meta/1588230740/.tmp/ns/fe67082dd86a49dca1432a88a6fac940 2024-11-16T19:30:02,668 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/hbase/meta/1588230740/.tmp/table/41d0adcffff74e35b71f8a340e04c23c is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731785352543/Put/seqid=0 2024-11-16T19:30:02,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38639 is added to blk_1073741848_1024 (size=5508) 2024-11-16T19:30:02,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39823 is added to blk_1073741848_1024 (size=5508) 2024-11-16T19:30:02,673 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/hbase/meta/1588230740/.tmp/table/41d0adcffff74e35b71f8a340e04c23c 2024-11-16T19:30:02,678 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/hbase/meta/1588230740/.tmp/info/13f9c95d26b14b40a94fc481db4e8688 as hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/hbase/meta/1588230740/info/13f9c95d26b14b40a94fc481db4e8688 2024-11-16T19:30:02,684 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/hbase/meta/1588230740/info/13f9c95d26b14b40a94fc481db4e8688, entries=10, sequenceid=11, filesize=7.1 K 2024-11-16T19:30:02,685 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/hbase/meta/1588230740/.tmp/ns/fe67082dd86a49dca1432a88a6fac940 as hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/hbase/meta/1588230740/ns/fe67082dd86a49dca1432a88a6fac940 2024-11-16T19:30:02,690 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/hbase/meta/1588230740/ns/fe67082dd86a49dca1432a88a6fac940, entries=2, sequenceid=11, filesize=5.0 K 2024-11-16T19:30:02,691 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/hbase/meta/1588230740/.tmp/table/41d0adcffff74e35b71f8a340e04c23c as hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/hbase/meta/1588230740/table/41d0adcffff74e35b71f8a340e04c23c 2024-11-16T19:30:02,696 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/hbase/meta/1588230740/table/41d0adcffff74e35b71f8a340e04c23c, entries=2, sequenceid=11, filesize=5.4 K 2024-11-16T19:30:02,697 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 95ms, sequenceid=11, compaction requested=false 2024-11-16T19:30:02,701 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-16T19:30:02,702 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T19:30:02,702 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T19:30:02,702 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731785402602Running coprocessor pre-close hooks at 1731785402602Disabling compacts and flushes for region at 1731785402602Disabling writes for close at 1731785402602Obtaining lock to block concurrent updates at 1731785402602Preparing flush snapshotting stores in 1588230740 at 1731785402602Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731785402603 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731785402603Flushing 1588230740/info: creating writer at 1731785402603Flushing 1588230740/info: appending metadata at 1731785402622 (+19 ms)Flushing 1588230740/info: closing flushed file at 1731785402622Flushing 1588230740/ns: creating writer at 1731785402633 (+11 ms)Flushing 1588230740/ns: appending metadata at 1731785402646 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1731785402646Flushing 1588230740/table: creating writer at 1731785402656 (+10 ms)Flushing 1588230740/table: appending metadata at 1731785402668 (+12 ms)Flushing 1588230740/table: closing flushed file at 1731785402668Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5c1b40e2: reopening flushed file at 1731785402678 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5fb1dc24: reopening flushed file at 1731785402684 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5b2c13e5: reopening flushed file at 1731785402690 (+6 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 95ms, sequenceid=11, compaction requested=false at 1731785402697 (+7 ms)Writing region close event to WAL at 1731785402698 (+1 ms)Running coprocessor post-close hooks at 1731785402702 (+4 ms)Closed at 1731785402702 2024-11-16T19:30:02,702 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T19:30:02,802 INFO [RS:0;d11ab77873cb:38717 {}] regionserver.HRegionServer(976): stopping server d11ab77873cb,38717,1731785351335; all regions closed. 2024-11-16T19:30:02,803 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:30:02,803 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:30:02,803 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:30:02,803 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:30:02,803 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:30:02,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39823 is added to blk_1073741834_1010 (size=3306) 2024-11-16T19:30:02,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38639 is added to blk_1073741834_1010 (size=3306) 2024-11-16T19:30:02,807 DEBUG [RS:0;d11ab77873cb:38717 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/oldWALs 2024-11-16T19:30:02,807 INFO [RS:0;d11ab77873cb:38717 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d11ab77873cb%2C38717%2C1731785351335.meta:.meta(num 1731785352077) 2024-11-16T19:30:02,808 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:30:02,808 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:30:02,808 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:30:02,808 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:30:02,808 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:30:02,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38639 is added to blk_1073741844_1020 (size=1252) 2024-11-16T19:30:02,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39823 is added to blk_1073741844_1020 (size=1252) 2024-11-16T19:30:02,813 DEBUG [RS:0;d11ab77873cb:38717 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/oldWALs 2024-11-16T19:30:02,813 INFO [RS:0;d11ab77873cb:38717 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d11ab77873cb%2C38717%2C1731785351335:(num 1731785402580) 2024-11-16T19:30:02,813 DEBUG [RS:0;d11ab77873cb:38717 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:30:02,813 INFO [RS:0;d11ab77873cb:38717 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T19:30:02,813 INFO [RS:0;d11ab77873cb:38717 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T19:30:02,813 INFO [RS:0;d11ab77873cb:38717 {}] hbase.ChoreService(370): Chore service for: regionserver/d11ab77873cb:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-16T19:30:02,813 INFO [RS:0;d11ab77873cb:38717 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T19:30:02,813 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T19:30:02,814 INFO [RS:0;d11ab77873cb:38717 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38717 2024-11-16T19:30:02,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40933-0x1004a02bcd90000, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T19:30:02,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38717-0x1004a02bcd90001, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/d11ab77873cb,38717,1731785351335 2024-11-16T19:30:02,815 INFO [RS:0;d11ab77873cb:38717 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T19:30:02,815 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [d11ab77873cb,38717,1731785351335] 2024-11-16T19:30:02,816 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/d11ab77873cb,38717,1731785351335 already deleted, retry=false 2024-11-16T19:30:02,816 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; d11ab77873cb,38717,1731785351335 expired; onlineServers=0 2024-11-16T19:30:02,816 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'd11ab77873cb,40933,1731785351288' ***** 2024-11-16T19:30:02,816 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T19:30:02,816 INFO [M:0;d11ab77873cb:40933 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T19:30:02,816 INFO [M:0;d11ab77873cb:40933 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T19:30:02,816 DEBUG [M:0;d11ab77873cb:40933 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T19:30:02,817 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T19:30:02,817 DEBUG [M:0;d11ab77873cb:40933 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T19:30:02,817 DEBUG [master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.large.0-1731785351469 {}] cleaner.HFileCleaner(306): Exit Thread[master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.large.0-1731785351469,5,FailOnTimeoutGroup] 2024-11-16T19:30:02,817 DEBUG [master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.small.0-1731785351469 {}] cleaner.HFileCleaner(306): Exit Thread[master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.small.0-1731785351469,5,FailOnTimeoutGroup] 2024-11-16T19:30:02,817 INFO [M:0;d11ab77873cb:40933 {}] hbase.ChoreService(370): Chore service for: master/d11ab77873cb:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T19:30:02,817 INFO [M:0;d11ab77873cb:40933 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T19:30:02,817 DEBUG [M:0;d11ab77873cb:40933 {}] master.HMaster(1795): Stopping service threads 2024-11-16T19:30:02,817 INFO [M:0;d11ab77873cb:40933 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T19:30:02,817 INFO [M:0;d11ab77873cb:40933 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T19:30:02,817 INFO [M:0;d11ab77873cb:40933 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T19:30:02,817 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T19:30:02,818 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40933-0x1004a02bcd90000, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T19:30:02,818 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40933-0x1004a02bcd90000, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:30:02,818 DEBUG [M:0;d11ab77873cb:40933 {}] zookeeper.ZKUtil(347): master:40933-0x1004a02bcd90000, quorum=127.0.0.1:51672, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T19:30:02,818 WARN [M:0;d11ab77873cb:40933 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T19:30:02,818 INFO [M:0;d11ab77873cb:40933 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/.lastflushedseqids 2024-11-16T19:30:02,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39823 is added to blk_1073741849_1025 (size=130) 2024-11-16T19:30:02,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38639 is added to blk_1073741849_1025 (size=130) 2024-11-16T19:30:02,823 INFO [M:0;d11ab77873cb:40933 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T19:30:02,824 INFO [M:0;d11ab77873cb:40933 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T19:30:02,824 DEBUG [M:0;d11ab77873cb:40933 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T19:30:02,824 INFO [M:0;d11ab77873cb:40933 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:30:02,824 DEBUG [M:0;d11ab77873cb:40933 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:30:02,824 DEBUG [M:0;d11ab77873cb:40933 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T19:30:02,824 DEBUG [M:0;d11ab77873cb:40933 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:30:02,824 INFO [M:0;d11ab77873cb:40933 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.55 KB heapSize=54.91 KB 2024-11-16T19:30:02,837 DEBUG [M:0;d11ab77873cb:40933 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/80b0c293eb33439fb720b5bf1dd6cce4 is 82, key is hbase:meta,,1/info:regioninfo/1731785352103/Put/seqid=0 2024-11-16T19:30:02,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39823 is added to blk_1073741850_1026 (size=5672) 2024-11-16T19:30:02,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38639 is added to blk_1073741850_1026 (size=5672) 2024-11-16T19:30:02,842 INFO [M:0;d11ab77873cb:40933 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/80b0c293eb33439fb720b5bf1dd6cce4 2024-11-16T19:30:02,861 DEBUG [M:0;d11ab77873cb:40933 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9ecc7cc382664022b039bcb7711cfa65 is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731785352548/Put/seqid=0 2024-11-16T19:30:02,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39823 is added to blk_1073741851_1027 (size=7818) 2024-11-16T19:30:02,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38639 is added to blk_1073741851_1027 (size=7818) 2024-11-16T19:30:02,869 INFO [M:0;d11ab77873cb:40933 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.95 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9ecc7cc382664022b039bcb7711cfa65 2024-11-16T19:30:02,874 INFO [M:0;d11ab77873cb:40933 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9ecc7cc382664022b039bcb7711cfa65 2024-11-16T19:30:02,887 DEBUG [M:0;d11ab77873cb:40933 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d8415b965e434697ba1c51826f9654f8 is 69, key is d11ab77873cb,38717,1731785351335/rs:state/1731785351564/Put/seqid=0 2024-11-16T19:30:02,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39823 is added to blk_1073741852_1028 (size=5156) 2024-11-16T19:30:02,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38639 is added to blk_1073741852_1028 (size=5156) 2024-11-16T19:30:02,893 INFO [M:0;d11ab77873cb:40933 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d8415b965e434697ba1c51826f9654f8 2024-11-16T19:30:02,910 DEBUG [M:0;d11ab77873cb:40933 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f7cfcf09c3984a9fba749f5ebe1f2e19 is 52, key is load_balancer_on/state:d/1731785352159/Put/seqid=0 2024-11-16T19:30:02,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38639 is added to blk_1073741853_1029 (size=5056) 2024-11-16T19:30:02,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39823 is added to blk_1073741853_1029 (size=5056) 2024-11-16T19:30:02,915 INFO [M:0;d11ab77873cb:40933 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f7cfcf09c3984a9fba749f5ebe1f2e19 2024-11-16T19:30:02,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38717-0x1004a02bcd90001, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T19:30:02,916 INFO [RS:0;d11ab77873cb:38717 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T19:30:02,916 INFO [RS:0;d11ab77873cb:38717 {}] regionserver.HRegionServer(1031): Exiting; stopping=d11ab77873cb,38717,1731785351335; zookeeper connection closed. 2024-11-16T19:30:02,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38717-0x1004a02bcd90001, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T19:30:02,916 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@25d88ac2 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@25d88ac2 2024-11-16T19:30:02,917 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T19:30:02,920 DEBUG [M:0;d11ab77873cb:40933 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/80b0c293eb33439fb720b5bf1dd6cce4 as hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/80b0c293eb33439fb720b5bf1dd6cce4 2024-11-16T19:30:02,925 INFO [M:0;d11ab77873cb:40933 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/80b0c293eb33439fb720b5bf1dd6cce4, entries=8, sequenceid=121, filesize=5.5 K 2024-11-16T19:30:02,926 DEBUG [M:0;d11ab77873cb:40933 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9ecc7cc382664022b039bcb7711cfa65 as hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9ecc7cc382664022b039bcb7711cfa65 2024-11-16T19:30:02,930 INFO [M:0;d11ab77873cb:40933 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9ecc7cc382664022b039bcb7711cfa65 2024-11-16T19:30:02,930 INFO [M:0;d11ab77873cb:40933 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9ecc7cc382664022b039bcb7711cfa65, entries=14, sequenceid=121, filesize=7.6 K 2024-11-16T19:30:02,931 DEBUG [M:0;d11ab77873cb:40933 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d8415b965e434697ba1c51826f9654f8 as hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d8415b965e434697ba1c51826f9654f8 2024-11-16T19:30:02,936 INFO [M:0;d11ab77873cb:40933 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d8415b965e434697ba1c51826f9654f8, entries=1, sequenceid=121, filesize=5.0 K 2024-11-16T19:30:02,936 DEBUG [M:0;d11ab77873cb:40933 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f7cfcf09c3984a9fba749f5ebe1f2e19 as hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f7cfcf09c3984a9fba749f5ebe1f2e19 2024-11-16T19:30:02,940 INFO [M:0;d11ab77873cb:40933 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43173/user/jenkins/test-data/d0436c52-cc63-d92b-c729-33c1ed17b6ef/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f7cfcf09c3984a9fba749f5ebe1f2e19, entries=1, sequenceid=121, filesize=4.9 K 2024-11-16T19:30:02,941 INFO [M:0;d11ab77873cb:40933 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.55 KB/44593, heapSize ~54.85 KB/56168, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 117ms, sequenceid=121, compaction requested=false 2024-11-16T19:30:02,943 INFO [M:0;d11ab77873cb:40933 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:30:02,943 DEBUG [M:0;d11ab77873cb:40933 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731785402824Disabling compacts and flushes for region at 1731785402824Disabling writes for close at 1731785402824Obtaining lock to block concurrent updates at 1731785402824Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731785402824Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44593, getHeapSize=56168, getOffHeapSize=0, getCellsCount=140 at 1731785402824Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731785402825 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731785402825Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731785402837 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731785402837Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731785402847 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731785402860 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731785402860Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731785402874 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731785402887 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731785402887Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731785402897 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731785402910 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731785402910Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3fb013c1: reopening flushed file at 1731785402920 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@a179610: reopening flushed file at 1731785402925 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7b28c119: reopening flushed file at 1731785402931 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@427b9f87: reopening flushed file at 1731785402936 (+5 ms)Finished flush of dataSize ~43.55 KB/44593, heapSize ~54.85 KB/56168, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 117ms, sequenceid=121, compaction requested=false at 1731785402941 (+5 ms)Writing region close event to WAL at 1731785402943 (+2 ms)Closed at 1731785402943 2024-11-16T19:30:02,943 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:30:02,943 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:30:02,944 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:30:02,944 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:30:02,944 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:30:02,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38639 is added to blk_1073741830_1006 (size=52990) 2024-11-16T19:30:02,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39823 is added to blk_1073741830_1006 (size=52990) 2024-11-16T19:30:02,946 INFO [M:0;d11ab77873cb:40933 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T19:30:02,946 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T19:30:02,946 INFO [M:0;d11ab77873cb:40933 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40933 2024-11-16T19:30:02,946 INFO [M:0;d11ab77873cb:40933 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T19:30:03,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40933-0x1004a02bcd90000, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T19:30:03,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40933-0x1004a02bcd90000, quorum=127.0.0.1:51672, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T19:30:03,050 INFO [M:0;d11ab77873cb:40933 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T19:30:03,070 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5d8d7f9b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:30:03,070 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@190ad9e7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T19:30:03,071 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T19:30:03,071 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16aeea80{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T19:30:03,071 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@b24fbcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/hadoop.log.dir/,STOPPED} 2024-11-16T19:30:03,073 WARN [BP-780948166-172.17.0.2-1731785350708 heartbeating to localhost/127.0.0.1:43173 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T19:30:03,073 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T19:30:03,073 WARN [BP-780948166-172.17.0.2-1731785350708 heartbeating to localhost/127.0.0.1:43173 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-780948166-172.17.0.2-1731785350708 (Datanode Uuid 56eb494f-fe7b-4c24-9106-5f68b9058e64) service to localhost/127.0.0.1:43173 2024-11-16T19:30:03,073 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T19:30:03,074 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T19:30:03,077 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/cluster_16e972a0-4f45-7c9c-d7b9-4b3aeba5bcef/data/data3/current/BP-780948166-172.17.0.2-1731785350708 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:30:03,081 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/cluster_16e972a0-4f45-7c9c-d7b9-4b3aeba5bcef/data/data4/current/BP-780948166-172.17.0.2-1731785350708 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:30:03,082 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2d027a3b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:30:03,083 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5b5b6aa1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T19:30:03,083 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T19:30:03,083 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fa328f4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T19:30:03,083 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1b3167c8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/hadoop.log.dir/,STOPPED} 2024-11-16T19:30:03,086 WARN [BP-780948166-172.17.0.2-1731785350708 heartbeating to localhost/127.0.0.1:43173 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T19:30:03,086 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T19:30:03,086 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T19:30:03,086 WARN [BP-780948166-172.17.0.2-1731785350708 heartbeating to localhost/127.0.0.1:43173 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-780948166-172.17.0.2-1731785350708 (Datanode Uuid cc89cd6c-53ac-40af-8b4c-ce6898d285e9) service to localhost/127.0.0.1:43173 2024-11-16T19:30:03,086 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/cluster_16e972a0-4f45-7c9c-d7b9-4b3aeba5bcef/data/data1/current/BP-780948166-172.17.0.2-1731785350708 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:30:03,086 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/cluster_16e972a0-4f45-7c9c-d7b9-4b3aeba5bcef/data/data2/current/BP-780948166-172.17.0.2-1731785350708 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:30:03,087 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T19:30:03,094 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@232381c8{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T19:30:03,096 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@290f188f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T19:30:03,096 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T19:30:03,096 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5bbcc3bf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T19:30:03,096 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@78b6351{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/hadoop.log.dir/,STOPPED} 2024-11-16T19:30:03,102 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T19:30:03,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T19:30:03,148 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=207 (was 181) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43173 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43173 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43173 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43173 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43173 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43173 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43173 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:43173 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=83 (was 109), ProcessCount=11 (was 11), AvailableMemoryMB=1980 (was 2191) 2024-11-16T19:30:03,157 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=207, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=83, ProcessCount=11, AvailableMemoryMB=1978 2024-11-16T19:30:03,157 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T19:30:03,157 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/hadoop.log.dir so I do NOT create it in target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c 2024-11-16T19:30:03,157 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7eab71d8-134b-ce8a-d63a-2ad578f38a49/hadoop.tmp.dir so I do NOT create it in target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c 2024-11-16T19:30:03,157 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/cluster_aadf2298-c241-b4a0-4c5f-d80730f5b5fd, deleteOnExit=true 2024-11-16T19:30:03,157 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T19:30:03,158 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/test.cache.data in system properties and HBase conf 2024-11-16T19:30:03,158 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T19:30:03,158 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/hadoop.log.dir in system properties and HBase conf 2024-11-16T19:30:03,158 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T19:30:03,158 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T19:30:03,158 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T19:30:03,158 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T19:30:03,158 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T19:30:03,158 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T19:30:03,158 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T19:30:03,158 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T19:30:03,158 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T19:30:03,158 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T19:30:03,159 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T19:30:03,159 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T19:30:03,159 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T19:30:03,159 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/nfs.dump.dir in system properties and HBase conf 2024-11-16T19:30:03,159 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/java.io.tmpdir in system properties and HBase conf 2024-11-16T19:30:03,159 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T19:30:03,159 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T19:30:03,159 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T19:30:03,180 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T19:30:03,263 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:30:03,271 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T19:30:03,287 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T19:30:03,288 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T19:30:03,288 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T19:30:03,290 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:30:03,290 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10ce7a76{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/hadoop.log.dir/,AVAILABLE} 2024-11-16T19:30:03,291 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@56433553{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T19:30:03,432 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@52d230c9{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/java.io.tmpdir/jetty-localhost-35093-hadoop-hdfs-3_4_1-tests_jar-_-any-5581642062546214262/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T19:30:03,432 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@240fc28c{HTTP/1.1, (http/1.1)}{localhost:35093} 2024-11-16T19:30:03,432 INFO [Time-limited test {}] server.Server(415): Started @230086ms 2024-11-16T19:30:03,449 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T19:30:03,515 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:30:03,519 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T19:30:03,521 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T19:30:03,521 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T19:30:03,521 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T19:30:03,522 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@a48d3d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/hadoop.log.dir/,AVAILABLE} 2024-11-16T19:30:03,523 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@53cff5cb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T19:30:03,529 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:03,530 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:03,580 INFO [regionserver/d11ab77873cb:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T19:30:03,634 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@45890504{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/java.io.tmpdir/jetty-localhost-42965-hadoop-hdfs-3_4_1-tests_jar-_-any-10843573472040920474/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:30:03,634 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3d639fc0{HTTP/1.1, (http/1.1)}{localhost:42965} 2024-11-16T19:30:03,634 INFO [Time-limited test {}] server.Server(415): Started @230288ms 2024-11-16T19:30:03,635 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T19:30:03,679 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:30:03,682 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T19:30:03,687 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T19:30:03,688 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T19:30:03,688 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T19:30:03,688 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7305dd28{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/hadoop.log.dir/,AVAILABLE} 2024-11-16T19:30:03,688 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1dab95de{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T19:30:03,699 WARN [Thread-1946 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/cluster_aadf2298-c241-b4a0-4c5f-d80730f5b5fd/data/data2/current/BP-1326534687-172.17.0.2-1731785403184/current, will proceed with Du for space computation calculation, 2024-11-16T19:30:03,699 WARN [Thread-1945 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/cluster_aadf2298-c241-b4a0-4c5f-d80730f5b5fd/data/data1/current/BP-1326534687-172.17.0.2-1731785403184/current, will proceed with Du for space computation calculation, 2024-11-16T19:30:03,720 WARN [Thread-1924 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T19:30:03,723 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xab8a238c1fe6ec08 with lease ID 0x3701a99164fbcb5f: Processing first storage report for DS-910dd642-6321-4f76-b9df-ccc8b58bc38c from datanode DatanodeRegistration(127.0.0.1:41019, datanodeUuid=4389e0b7-99f0-46ab-8cb7-1f75eace8c24, infoPort=38963, infoSecurePort=0, ipcPort=34361, storageInfo=lv=-57;cid=testClusterID;nsid=1263281160;c=1731785403184) 2024-11-16T19:30:03,723 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xab8a238c1fe6ec08 with lease ID 0x3701a99164fbcb5f: from storage DS-910dd642-6321-4f76-b9df-ccc8b58bc38c node DatanodeRegistration(127.0.0.1:41019, datanodeUuid=4389e0b7-99f0-46ab-8cb7-1f75eace8c24, infoPort=38963, infoSecurePort=0, ipcPort=34361, storageInfo=lv=-57;cid=testClusterID;nsid=1263281160;c=1731785403184), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:30:03,723 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xab8a238c1fe6ec08 with lease ID 0x3701a99164fbcb5f: Processing first storage report for DS-7af30762-96d0-4c53-840d-ee171051bc81 from datanode DatanodeRegistration(127.0.0.1:41019, datanodeUuid=4389e0b7-99f0-46ab-8cb7-1f75eace8c24, infoPort=38963, infoSecurePort=0, ipcPort=34361, storageInfo=lv=-57;cid=testClusterID;nsid=1263281160;c=1731785403184) 2024-11-16T19:30:03,723 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xab8a238c1fe6ec08 with lease ID 0x3701a99164fbcb5f: from storage DS-7af30762-96d0-4c53-840d-ee171051bc81 node DatanodeRegistration(127.0.0.1:41019, datanodeUuid=4389e0b7-99f0-46ab-8cb7-1f75eace8c24, infoPort=38963, infoSecurePort=0, ipcPort=34361, storageInfo=lv=-57;cid=testClusterID;nsid=1263281160;c=1731785403184), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:30:03,803 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@9612b29{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/java.io.tmpdir/jetty-localhost-43829-hadoop-hdfs-3_4_1-tests_jar-_-any-10234522258866124053/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:30:03,803 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@314e7370{HTTP/1.1, (http/1.1)}{localhost:43829} 2024-11-16T19:30:03,803 INFO [Time-limited test {}] server.Server(415): Started @230457ms 2024-11-16T19:30:03,804 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T19:30:03,856 WARN [Thread-1972 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/cluster_aadf2298-c241-b4a0-4c5f-d80730f5b5fd/data/data4/current/BP-1326534687-172.17.0.2-1731785403184/current, will proceed with Du for space computation calculation, 2024-11-16T19:30:03,856 WARN [Thread-1971 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/cluster_aadf2298-c241-b4a0-4c5f-d80730f5b5fd/data/data3/current/BP-1326534687-172.17.0.2-1731785403184/current, will proceed with Du for space computation calculation, 2024-11-16T19:30:03,872 WARN [Thread-1960 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T19:30:03,874 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc70b72c6716d0f69 with lease ID 0x3701a99164fbcb60: Processing first storage report for DS-5027725f-6fb4-4022-9cc4-0b9a90e582be from datanode DatanodeRegistration(127.0.0.1:37181, datanodeUuid=8ed6f7a9-36bf-46a3-b402-ddf46d21a5b5, infoPort=33277, infoSecurePort=0, ipcPort=46457, storageInfo=lv=-57;cid=testClusterID;nsid=1263281160;c=1731785403184) 2024-11-16T19:30:03,875 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc70b72c6716d0f69 with lease ID 0x3701a99164fbcb60: from storage DS-5027725f-6fb4-4022-9cc4-0b9a90e582be node DatanodeRegistration(127.0.0.1:37181, datanodeUuid=8ed6f7a9-36bf-46a3-b402-ddf46d21a5b5, infoPort=33277, infoSecurePort=0, ipcPort=46457, storageInfo=lv=-57;cid=testClusterID;nsid=1263281160;c=1731785403184), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:30:03,875 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc70b72c6716d0f69 with lease ID 0x3701a99164fbcb60: Processing first storage report for DS-601caa31-aaa7-44f3-b7a8-27db022f01eb from datanode DatanodeRegistration(127.0.0.1:37181, datanodeUuid=8ed6f7a9-36bf-46a3-b402-ddf46d21a5b5, infoPort=33277, infoSecurePort=0, ipcPort=46457, storageInfo=lv=-57;cid=testClusterID;nsid=1263281160;c=1731785403184) 2024-11-16T19:30:03,875 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc70b72c6716d0f69 with lease ID 0x3701a99164fbcb60: from storage DS-601caa31-aaa7-44f3-b7a8-27db022f01eb node DatanodeRegistration(127.0.0.1:37181, datanodeUuid=8ed6f7a9-36bf-46a3-b402-ddf46d21a5b5, infoPort=33277, infoSecurePort=0, ipcPort=46457, storageInfo=lv=-57;cid=testClusterID;nsid=1263281160;c=1731785403184), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:30:03,923 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c 2024-11-16T19:30:03,926 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/cluster_aadf2298-c241-b4a0-4c5f-d80730f5b5fd/zookeeper_0, clientPort=62282, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/cluster_aadf2298-c241-b4a0-4c5f-d80730f5b5fd/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/cluster_aadf2298-c241-b4a0-4c5f-d80730f5b5fd/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T19:30:03,926 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62282 2024-11-16T19:30:03,926 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:30:03,927 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:30:03,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741825_1001 (size=7) 2024-11-16T19:30:03,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741825_1001 (size=7) 2024-11-16T19:30:03,935 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45 with version=8 2024-11-16T19:30:03,935 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/hbase-staging 2024-11-16T19:30:03,938 INFO [Time-limited test {}] client.ConnectionUtils(128): master/d11ab77873cb:0 server-side Connection retries=45 2024-11-16T19:30:03,938 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T19:30:03,938 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T19:30:03,938 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T19:30:03,938 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T19:30:03,938 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T19:30:03,938 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T19:30:03,938 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T19:30:03,939 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43337 2024-11-16T19:30:03,941 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43337 connecting to ZooKeeper ensemble=127.0.0.1:62282 2024-11-16T19:30:03,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:433370x0, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T19:30:03,945 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43337-0x1004a038a830000 connected 2024-11-16T19:30:03,954 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:30:03,956 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:30:03,958 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43337-0x1004a038a830000, quorum=127.0.0.1:62282, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T19:30:03,958 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45, hbase.cluster.distributed=false 2024-11-16T19:30:03,960 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43337-0x1004a038a830000, quorum=127.0.0.1:62282, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T19:30:03,960 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43337 2024-11-16T19:30:03,960 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43337 2024-11-16T19:30:03,961 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43337 2024-11-16T19:30:03,963 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43337 2024-11-16T19:30:03,963 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43337 2024-11-16T19:30:03,984 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/d11ab77873cb:0 server-side Connection retries=45 2024-11-16T19:30:03,984 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T19:30:03,985 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T19:30:03,985 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T19:30:03,985 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T19:30:03,985 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T19:30:03,985 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T19:30:03,985 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T19:30:03,986 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41539 2024-11-16T19:30:03,988 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41539 connecting to ZooKeeper ensemble=127.0.0.1:62282 2024-11-16T19:30:03,988 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:30:03,990 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:30:03,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:415390x0, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T19:30:03,995 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:415390x0, quorum=127.0.0.1:62282, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T19:30:03,995 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41539-0x1004a038a830001 connected 2024-11-16T19:30:03,995 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T19:30:03,996 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T19:30:03,996 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41539-0x1004a038a830001, quorum=127.0.0.1:62282, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T19:30:03,997 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41539-0x1004a038a830001, quorum=127.0.0.1:62282, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T19:30:03,997 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41539 2024-11-16T19:30:03,998 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41539 2024-11-16T19:30:03,998 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41539 2024-11-16T19:30:03,998 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41539 2024-11-16T19:30:03,998 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41539 2024-11-16T19:30:04,008 DEBUG [M:0;d11ab77873cb:43337 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;d11ab77873cb:43337 2024-11-16T19:30:04,008 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/d11ab77873cb,43337,1731785403938 2024-11-16T19:30:04,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43337-0x1004a038a830000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T19:30:04,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41539-0x1004a038a830001, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T19:30:04,010 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43337-0x1004a038a830000, quorum=127.0.0.1:62282, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/d11ab77873cb,43337,1731785403938 2024-11-16T19:30:04,010 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43337-0x1004a038a830000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:30:04,010 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41539-0x1004a038a830001, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T19:30:04,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41539-0x1004a038a830001, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:30:04,011 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43337-0x1004a038a830000, quorum=127.0.0.1:62282, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T19:30:04,011 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/d11ab77873cb,43337,1731785403938 from backup master directory 2024-11-16T19:30:04,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41539-0x1004a038a830001, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T19:30:04,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43337-0x1004a038a830000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/d11ab77873cb,43337,1731785403938 2024-11-16T19:30:04,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43337-0x1004a038a830000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T19:30:04,012 WARN [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T19:30:04,012 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=d11ab77873cb,43337,1731785403938 2024-11-16T19:30:04,016 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/hbase.id] with ID: 9db3c856-6da3-4d98-ba3e-9344a821a6a2 2024-11-16T19:30:04,016 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/.tmp/hbase.id 2024-11-16T19:30:04,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741826_1002 (size=42) 2024-11-16T19:30:04,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741826_1002 (size=42) 2024-11-16T19:30:04,022 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/.tmp/hbase.id]:[hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/hbase.id] 2024-11-16T19:30:04,031 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:30:04,031 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T19:30:04,032 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-16T19:30:04,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43337-0x1004a038a830000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:30:04,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41539-0x1004a038a830001, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:30:04,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741827_1003 (size=196) 2024-11-16T19:30:04,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741827_1003 (size=196) 2024-11-16T19:30:04,039 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T19:30:04,039 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T19:30:04,040 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T19:30:04,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741828_1004 (size=1189) 2024-11-16T19:30:04,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741828_1004 (size=1189) 2024-11-16T19:30:04,049 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/MasterData/data/master/store 2024-11-16T19:30:04,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741829_1005 (size=34) 2024-11-16T19:30:04,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741829_1005 (size=34) 2024-11-16T19:30:04,055 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:30:04,055 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T19:30:04,055 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:30:04,055 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:30:04,056 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T19:30:04,056 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:30:04,056 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:30:04,056 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731785404055Disabling compacts and flushes for region at 1731785404055Disabling writes for close at 1731785404056 (+1 ms)Writing region close event to WAL at 1731785404056Closed at 1731785404056 2024-11-16T19:30:04,056 WARN [master/d11ab77873cb:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/MasterData/data/master/store/.initializing 2024-11-16T19:30:04,056 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/MasterData/WALs/d11ab77873cb,43337,1731785403938 2024-11-16T19:30:04,058 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d11ab77873cb%2C43337%2C1731785403938, suffix=, logDir=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/MasterData/WALs/d11ab77873cb,43337,1731785403938, archiveDir=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/MasterData/oldWALs, maxLogs=10 2024-11-16T19:30:04,059 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C43337%2C1731785403938.1731785404059 2024-11-16T19:30:04,063 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/MasterData/WALs/d11ab77873cb,43337,1731785403938/d11ab77873cb%2C43337%2C1731785403938.1731785404059 2024-11-16T19:30:04,063 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33277:33277),(127.0.0.1/127.0.0.1:38963:38963)] 2024-11-16T19:30:04,064 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T19:30:04,064 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:30:04,064 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:30:04,064 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:30:04,066 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:30:04,067 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T19:30:04,067 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:30:04,067 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:30:04,067 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:30:04,068 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T19:30:04,068 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:30:04,068 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T19:30:04,068 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:30:04,069 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T19:30:04,069 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:30:04,069 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T19:30:04,070 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:30:04,070 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T19:30:04,070 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:30:04,071 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T19:30:04,071 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:30:04,072 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:30:04,072 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:30:04,073 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:30:04,073 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:30:04,073 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T19:30:04,074 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:30:04,076 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T19:30:04,076 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=691436, jitterRate=-0.12079453468322754}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T19:30:04,077 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731785404065Initializing all the Stores at 1731785404065Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785404065Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785404065Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785404065Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785404065Cleaning up temporary data from old regions at 1731785404073 (+8 ms)Region opened successfully at 1731785404077 (+4 ms) 2024-11-16T19:30:04,077 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T19:30:04,080 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f03f7d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d11ab77873cb/172.17.0.2:0 2024-11-16T19:30:04,081 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T19:30:04,081 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T19:30:04,081 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T19:30:04,081 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T19:30:04,082 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-16T19:30:04,082 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T19:30:04,082 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T19:30:04,084 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T19:30:04,085 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43337-0x1004a038a830000, quorum=127.0.0.1:62282, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T19:30:04,086 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T19:30:04,086 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T19:30:04,087 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43337-0x1004a038a830000, quorum=127.0.0.1:62282, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T19:30:04,087 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T19:30:04,088 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T19:30:04,088 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43337-0x1004a038a830000, quorum=127.0.0.1:62282, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T19:30:04,089 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T19:30:04,089 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43337-0x1004a038a830000, quorum=127.0.0.1:62282, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T19:30:04,090 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T19:30:04,091 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43337-0x1004a038a830000, quorum=127.0.0.1:62282, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T19:30:04,092 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T19:30:04,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41539-0x1004a038a830001, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T19:30:04,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41539-0x1004a038a830001, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:30:04,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43337-0x1004a038a830000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T19:30:04,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43337-0x1004a038a830000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:30:04,094 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=d11ab77873cb,43337,1731785403938, sessionid=0x1004a038a830000, setting cluster-up flag (Was=false) 2024-11-16T19:30:04,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43337-0x1004a038a830000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:30:04,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41539-0x1004a038a830001, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:30:04,097 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T19:30:04,098 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d11ab77873cb,43337,1731785403938 2024-11-16T19:30:04,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41539-0x1004a038a830001, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:30:04,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43337-0x1004a038a830000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:30:04,102 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T19:30:04,103 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d11ab77873cb,43337,1731785403938 2024-11-16T19:30:04,104 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T19:30:04,105 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T19:30:04,106 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T19:30:04,106 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T19:30:04,106 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: d11ab77873cb,43337,1731785403938 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T19:30:04,107 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/d11ab77873cb:0, corePoolSize=5, maxPoolSize=5 2024-11-16T19:30:04,107 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/d11ab77873cb:0, corePoolSize=5, maxPoolSize=5 2024-11-16T19:30:04,107 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/d11ab77873cb:0, corePoolSize=5, maxPoolSize=5 2024-11-16T19:30:04,107 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/d11ab77873cb:0, corePoolSize=5, maxPoolSize=5 2024-11-16T19:30:04,107 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/d11ab77873cb:0, corePoolSize=10, maxPoolSize=10 2024-11-16T19:30:04,107 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:30:04,107 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/d11ab77873cb:0, corePoolSize=2, maxPoolSize=2 2024-11-16T19:30:04,107 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:30:04,108 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731785434108 2024-11-16T19:30:04,108 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T19:30:04,108 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T19:30:04,108 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T19:30:04,108 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T19:30:04,108 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T19:30:04,108 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T19:30:04,108 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T19:30:04,109 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T19:30:04,109 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T19:30:04,109 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T19:30:04,109 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T19:30:04,109 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T19:30:04,109 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T19:30:04,109 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T19:30:04,109 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:30:04,109 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.large.0-1731785404109,5,FailOnTimeoutGroup] 2024-11-16T19:30:04,109 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.small.0-1731785404109,5,FailOnTimeoutGroup] 2024-11-16T19:30:04,109 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T19:30:04,109 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T19:30:04,110 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T19:30:04,110 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T19:30:04,110 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T19:30:04,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741831_1007 (size=1321) 2024-11-16T19:30:04,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741831_1007 (size=1321) 2024-11-16T19:30:04,117 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T19:30:04,117 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45 2024-11-16T19:30:04,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741832_1008 (size=32) 2024-11-16T19:30:04,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741832_1008 (size=32) 2024-11-16T19:30:04,125 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:30:04,126 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T19:30:04,127 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T19:30:04,127 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:30:04,128 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:30:04,128 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T19:30:04,129 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T19:30:04,129 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:30:04,129 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:30:04,129 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T19:30:04,131 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T19:30:04,131 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:30:04,131 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:30:04,131 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T19:30:04,132 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T19:30:04,133 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:30:04,133 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:30:04,133 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T19:30:04,134 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/hbase/meta/1588230740 2024-11-16T19:30:04,134 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/hbase/meta/1588230740 2024-11-16T19:30:04,135 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T19:30:04,135 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T19:30:04,136 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T19:30:04,137 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T19:30:04,139 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T19:30:04,139 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=776914, jitterRate=-0.012102901935577393}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T19:30:04,140 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731785404125Initializing all the Stores at 1731785404126 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785404126Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785404126Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785404126Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785404126Cleaning up temporary data from old regions at 1731785404135 (+9 ms)Region opened successfully at 1731785404140 (+5 ms) 2024-11-16T19:30:04,140 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T19:30:04,140 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T19:30:04,140 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T19:30:04,140 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T19:30:04,140 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T19:30:04,141 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T19:30:04,141 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731785404140Disabling compacts and flushes for region at 1731785404140Disabling writes for close at 1731785404140Writing region close event to WAL at 1731785404141 (+1 ms)Closed at 1731785404141 2024-11-16T19:30:04,142 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T19:30:04,142 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T19:30:04,142 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T19:30:04,144 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T19:30:04,145 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T19:30:04,200 INFO [RS:0;d11ab77873cb:41539 {}] regionserver.HRegionServer(746): ClusterId : 9db3c856-6da3-4d98-ba3e-9344a821a6a2 2024-11-16T19:30:04,200 DEBUG [RS:0;d11ab77873cb:41539 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T19:30:04,202 DEBUG [RS:0;d11ab77873cb:41539 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T19:30:04,202 DEBUG [RS:0;d11ab77873cb:41539 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T19:30:04,204 DEBUG [RS:0;d11ab77873cb:41539 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T19:30:04,204 DEBUG [RS:0;d11ab77873cb:41539 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bf290c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d11ab77873cb/172.17.0.2:0 2024-11-16T19:30:04,214 DEBUG [RS:0;d11ab77873cb:41539 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;d11ab77873cb:41539 2024-11-16T19:30:04,214 INFO [RS:0;d11ab77873cb:41539 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T19:30:04,214 INFO [RS:0;d11ab77873cb:41539 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T19:30:04,214 DEBUG [RS:0;d11ab77873cb:41539 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T19:30:04,215 INFO [RS:0;d11ab77873cb:41539 {}] regionserver.HRegionServer(2659): reportForDuty to master=d11ab77873cb,43337,1731785403938 with port=41539, startcode=1731785403984 2024-11-16T19:30:04,215 DEBUG [RS:0;d11ab77873cb:41539 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T19:30:04,216 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38893, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T19:30:04,217 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43337 {}] master.ServerManager(363): Checking decommissioned status of RegionServer d11ab77873cb,41539,1731785403984 2024-11-16T19:30:04,217 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43337 {}] master.ServerManager(517): Registering regionserver=d11ab77873cb,41539,1731785403984 2024-11-16T19:30:04,218 DEBUG [RS:0;d11ab77873cb:41539 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45 2024-11-16T19:30:04,218 DEBUG [RS:0;d11ab77873cb:41539 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45363 2024-11-16T19:30:04,218 DEBUG [RS:0;d11ab77873cb:41539 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T19:30:04,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43337-0x1004a038a830000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T19:30:04,220 DEBUG [RS:0;d11ab77873cb:41539 {}] zookeeper.ZKUtil(111): regionserver:41539-0x1004a038a830001, quorum=127.0.0.1:62282, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/d11ab77873cb,41539,1731785403984 2024-11-16T19:30:04,220 WARN [RS:0;d11ab77873cb:41539 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T19:30:04,220 INFO [RS:0;d11ab77873cb:41539 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T19:30:04,220 DEBUG [RS:0;d11ab77873cb:41539 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/WALs/d11ab77873cb,41539,1731785403984 2024-11-16T19:30:04,220 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [d11ab77873cb,41539,1731785403984] 2024-11-16T19:30:04,224 INFO [RS:0;d11ab77873cb:41539 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T19:30:04,225 INFO [RS:0;d11ab77873cb:41539 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T19:30:04,226 INFO [RS:0;d11ab77873cb:41539 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T19:30:04,226 INFO [RS:0;d11ab77873cb:41539 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T19:30:04,226 INFO [RS:0;d11ab77873cb:41539 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T19:30:04,227 INFO [RS:0;d11ab77873cb:41539 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T19:30:04,227 INFO [RS:0;d11ab77873cb:41539 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T19:30:04,227 DEBUG [RS:0;d11ab77873cb:41539 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:30:04,227 DEBUG [RS:0;d11ab77873cb:41539 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:30:04,227 DEBUG [RS:0;d11ab77873cb:41539 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:30:04,227 DEBUG [RS:0;d11ab77873cb:41539 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:30:04,227 DEBUG [RS:0;d11ab77873cb:41539 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:30:04,227 DEBUG [RS:0;d11ab77873cb:41539 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/d11ab77873cb:0, corePoolSize=2, maxPoolSize=2 2024-11-16T19:30:04,227 DEBUG [RS:0;d11ab77873cb:41539 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:30:04,227 DEBUG [RS:0;d11ab77873cb:41539 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:30:04,227 DEBUG [RS:0;d11ab77873cb:41539 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:30:04,227 DEBUG [RS:0;d11ab77873cb:41539 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:30:04,227 DEBUG [RS:0;d11ab77873cb:41539 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:30:04,227 DEBUG [RS:0;d11ab77873cb:41539 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:30:04,227 DEBUG [RS:0;d11ab77873cb:41539 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/d11ab77873cb:0, corePoolSize=3, maxPoolSize=3 2024-11-16T19:30:04,227 DEBUG [RS:0;d11ab77873cb:41539 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0, corePoolSize=3, maxPoolSize=3 2024-11-16T19:30:04,228 INFO [RS:0;d11ab77873cb:41539 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T19:30:04,228 INFO [RS:0;d11ab77873cb:41539 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T19:30:04,228 INFO [RS:0;d11ab77873cb:41539 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T19:30:04,228 INFO [RS:0;d11ab77873cb:41539 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T19:30:04,229 INFO [RS:0;d11ab77873cb:41539 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T19:30:04,229 INFO [RS:0;d11ab77873cb:41539 {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,41539,1731785403984-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T19:30:04,241 INFO [RS:0;d11ab77873cb:41539 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T19:30:04,241 INFO [RS:0;d11ab77873cb:41539 {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,41539,1731785403984-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T19:30:04,241 INFO [RS:0;d11ab77873cb:41539 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:30:04,241 INFO [RS:0;d11ab77873cb:41539 {}] regionserver.Replication(171): d11ab77873cb,41539,1731785403984 started 2024-11-16T19:30:04,253 INFO [RS:0;d11ab77873cb:41539 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:30:04,253 INFO [RS:0;d11ab77873cb:41539 {}] regionserver.HRegionServer(1482): Serving as d11ab77873cb,41539,1731785403984, RpcServer on d11ab77873cb/172.17.0.2:41539, sessionid=0x1004a038a830001 2024-11-16T19:30:04,254 DEBUG [RS:0;d11ab77873cb:41539 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T19:30:04,254 DEBUG [RS:0;d11ab77873cb:41539 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager d11ab77873cb,41539,1731785403984 2024-11-16T19:30:04,254 DEBUG [RS:0;d11ab77873cb:41539 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd11ab77873cb,41539,1731785403984' 2024-11-16T19:30:04,254 DEBUG [RS:0;d11ab77873cb:41539 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T19:30:04,254 DEBUG [RS:0;d11ab77873cb:41539 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T19:30:04,255 DEBUG [RS:0;d11ab77873cb:41539 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T19:30:04,255 DEBUG [RS:0;d11ab77873cb:41539 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T19:30:04,255 DEBUG [RS:0;d11ab77873cb:41539 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager d11ab77873cb,41539,1731785403984 2024-11-16T19:30:04,255 DEBUG [RS:0;d11ab77873cb:41539 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd11ab77873cb,41539,1731785403984' 2024-11-16T19:30:04,255 DEBUG [RS:0;d11ab77873cb:41539 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T19:30:04,255 DEBUG [RS:0;d11ab77873cb:41539 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T19:30:04,255 DEBUG [RS:0;d11ab77873cb:41539 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T19:30:04,255 INFO [RS:0;d11ab77873cb:41539 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T19:30:04,255 INFO [RS:0;d11ab77873cb:41539 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T19:30:04,295 WARN [d11ab77873cb:43337 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-16T19:30:04,357 INFO [RS:0;d11ab77873cb:41539 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d11ab77873cb%2C41539%2C1731785403984, suffix=, logDir=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/WALs/d11ab77873cb,41539,1731785403984, archiveDir=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/oldWALs, maxLogs=32 2024-11-16T19:30:04,358 INFO [RS:0;d11ab77873cb:41539 {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C41539%2C1731785403984.1731785404357 2024-11-16T19:30:04,363 INFO [RS:0;d11ab77873cb:41539 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/WALs/d11ab77873cb,41539,1731785403984/d11ab77873cb%2C41539%2C1731785403984.1731785404357 2024-11-16T19:30:04,364 DEBUG [RS:0;d11ab77873cb:41539 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33277:33277),(127.0.0.1/127.0.0.1:38963:38963)] 2024-11-16T19:30:04,529 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:04,530 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:04,545 DEBUG [d11ab77873cb:43337 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T19:30:04,546 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=d11ab77873cb,41539,1731785403984 2024-11-16T19:30:04,547 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d11ab77873cb,41539,1731785403984, state=OPENING 2024-11-16T19:30:04,548 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T19:30:04,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41539-0x1004a038a830001, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:30:04,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43337-0x1004a038a830000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:30:04,549 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T19:30:04,549 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T19:30:04,549 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T19:30:04,549 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=d11ab77873cb,41539,1731785403984}] 2024-11-16T19:30:04,702 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T19:30:04,704 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45441, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T19:30:04,708 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T19:30:04,708 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T19:30:04,710 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d11ab77873cb%2C41539%2C1731785403984.meta, suffix=.meta, logDir=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/WALs/d11ab77873cb,41539,1731785403984, archiveDir=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/oldWALs, maxLogs=32 2024-11-16T19:30:04,710 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C41539%2C1731785403984.meta.1731785404710.meta 2024-11-16T19:30:04,717 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/WALs/d11ab77873cb,41539,1731785403984/d11ab77873cb%2C41539%2C1731785403984.meta.1731785404710.meta 2024-11-16T19:30:04,721 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33277:33277),(127.0.0.1/127.0.0.1:38963:38963)] 2024-11-16T19:30:04,724 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T19:30:04,724 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T19:30:04,725 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T19:30:04,725 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T19:30:04,725 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T19:30:04,725 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:30:04,725 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T19:30:04,725 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T19:30:04,729 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T19:30:04,730 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T19:30:04,730 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:30:04,731 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:30:04,731 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T19:30:04,732 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T19:30:04,732 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:30:04,732 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:30:04,732 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T19:30:04,733 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T19:30:04,733 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:30:04,733 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:30:04,733 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T19:30:04,734 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T19:30:04,734 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:30:04,734 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:30:04,735 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T19:30:04,735 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/hbase/meta/1588230740 2024-11-16T19:30:04,736 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/hbase/meta/1588230740 2024-11-16T19:30:04,737 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T19:30:04,737 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T19:30:04,738 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T19:30:04,739 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T19:30:04,739 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=813445, jitterRate=0.03434908390045166}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T19:30:04,739 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T19:30:04,740 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731785404725Writing region info on filesystem at 1731785404725Initializing all the Stores at 1731785404726 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785404726Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785404729 (+3 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785404729Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785404729Cleaning up temporary data from old regions at 1731785404737 (+8 ms)Running coprocessor post-open hooks at 1731785404739 (+2 ms)Region opened successfully at 1731785404740 (+1 ms) 2024-11-16T19:30:04,741 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731785404702 2024-11-16T19:30:04,743 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T19:30:04,743 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T19:30:04,744 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=d11ab77873cb,41539,1731785403984 2024-11-16T19:30:04,745 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d11ab77873cb,41539,1731785403984, state=OPEN 2024-11-16T19:30:04,747 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43337-0x1004a038a830000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T19:30:04,747 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=d11ab77873cb,41539,1731785403984 2024-11-16T19:30:04,747 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41539-0x1004a038a830001, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T19:30:04,747 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T19:30:04,747 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T19:30:04,749 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T19:30:04,749 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=d11ab77873cb,41539,1731785403984 in 198 msec 2024-11-16T19:30:04,751 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T19:30:04,752 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 607 msec 2024-11-16T19:30:04,752 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T19:30:04,752 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T19:30:04,754 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T19:30:04,754 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d11ab77873cb,41539,1731785403984, seqNum=-1] 2024-11-16T19:30:04,754 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T19:30:04,755 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53723, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T19:30:04,760 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 655 msec 2024-11-16T19:30:04,761 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731785404761, completionTime=-1 2024-11-16T19:30:04,761 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T19:30:04,761 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-16T19:30:04,763 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-16T19:30:04,763 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731785464763 2024-11-16T19:30:04,763 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731785524763 2024-11-16T19:30:04,763 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-16T19:30:04,763 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,43337,1731785403938-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T19:30:04,763 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,43337,1731785403938-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:30:04,763 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,43337,1731785403938-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:30:04,763 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-d11ab77873cb:43337, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:30:04,763 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T19:30:04,764 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T19:30:04,765 DEBUG [master/d11ab77873cb:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T19:30:04,768 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.755sec 2024-11-16T19:30:04,768 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T19:30:04,768 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T19:30:04,768 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T19:30:04,768 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T19:30:04,768 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T19:30:04,768 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,43337,1731785403938-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T19:30:04,768 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,43337,1731785403938-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T19:30:04,771 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T19:30:04,771 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T19:30:04,771 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,43337,1731785403938-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:30:04,800 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@208716aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T19:30:04,800 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request d11ab77873cb,43337,-1 for getting cluster id 2024-11-16T19:30:04,801 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T19:30:04,802 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '9db3c856-6da3-4d98-ba3e-9344a821a6a2' 2024-11-16T19:30:04,802 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T19:30:04,802 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "9db3c856-6da3-4d98-ba3e-9344a821a6a2" 2024-11-16T19:30:04,802 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c51d21, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T19:30:04,802 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [d11ab77873cb,43337,-1] 2024-11-16T19:30:04,802 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T19:30:04,803 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:30:04,803 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43152, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T19:30:04,804 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7181dda1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T19:30:04,804 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T19:30:04,805 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d11ab77873cb,41539,1731785403984, seqNum=-1] 2024-11-16T19:30:04,805 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T19:30:04,806 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52850, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T19:30:04,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=d11ab77873cb,43337,1731785403938 2024-11-16T19:30:04,808 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:30:04,810 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T19:30:04,810 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-16T19:30:04,811 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is d11ab77873cb,43337,1731785403938 2024-11-16T19:30:04,811 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2195603a 2024-11-16T19:30:04,811 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-16T19:30:04,812 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43168, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-16T19:30:04,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43337 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-16T19:30:04,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43337 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-16T19:30:04,813 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43337 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T19:30:04,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43337 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-16T19:30:04,816 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T19:30:04,816 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:30:04,816 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43337 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-16T19:30:04,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43337 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T19:30:04,817 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T19:30:04,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741835_1011 (size=381) 2024-11-16T19:30:04,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741835_1011 (size=381) 2024-11-16T19:30:04,825 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 44d75487f1e80d2d12c5e413fac668aa, NAME => 'TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45 2024-11-16T19:30:04,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741836_1012 (size=64) 2024-11-16T19:30:04,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741836_1012 (size=64) 2024-11-16T19:30:04,832 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:30:04,832 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 44d75487f1e80d2d12c5e413fac668aa, disabling compactions & flushes 2024-11-16T19:30:04,832 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa. 2024-11-16T19:30:04,832 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa. 2024-11-16T19:30:04,832 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa. after waiting 0 ms 2024-11-16T19:30:04,832 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa. 2024-11-16T19:30:04,832 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa. 2024-11-16T19:30:04,832 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 44d75487f1e80d2d12c5e413fac668aa: Waiting for close lock at 1731785404832Disabling compacts and flushes for region at 1731785404832Disabling writes for close at 1731785404832Writing region close event to WAL at 1731785404832Closed at 1731785404832 2024-11-16T19:30:04,834 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T19:30:04,834 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731785404834"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731785404834"}]},"ts":"1731785404834"} 2024-11-16T19:30:04,836 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-16T19:30:04,837 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T19:30:04,837 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731785404837"}]},"ts":"1731785404837"} 2024-11-16T19:30:04,838 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-16T19:30:04,839 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=44d75487f1e80d2d12c5e413fac668aa, ASSIGN}] 2024-11-16T19:30:04,840 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=44d75487f1e80d2d12c5e413fac668aa, ASSIGN 2024-11-16T19:30:04,841 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=44d75487f1e80d2d12c5e413fac668aa, ASSIGN; state=OFFLINE, location=d11ab77873cb,41539,1731785403984; forceNewPlan=false, retain=false 2024-11-16T19:30:04,991 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=44d75487f1e80d2d12c5e413fac668aa, regionState=OPENING, regionLocation=d11ab77873cb,41539,1731785403984 2024-11-16T19:30:04,994 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=44d75487f1e80d2d12c5e413fac668aa, ASSIGN because future has completed 2024-11-16T19:30:04,994 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 44d75487f1e80d2d12c5e413fac668aa, server=d11ab77873cb,41539,1731785403984}] 2024-11-16T19:30:05,151 INFO [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa. 2024-11-16T19:30:05,151 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 44d75487f1e80d2d12c5e413fac668aa, NAME => 'TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa.', STARTKEY => '', ENDKEY => ''} 2024-11-16T19:30:05,151 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 44d75487f1e80d2d12c5e413fac668aa 2024-11-16T19:30:05,151 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:30:05,151 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 44d75487f1e80d2d12c5e413fac668aa 2024-11-16T19:30:05,151 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 44d75487f1e80d2d12c5e413fac668aa 2024-11-16T19:30:05,153 INFO [StoreOpener-44d75487f1e80d2d12c5e413fac668aa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 44d75487f1e80d2d12c5e413fac668aa 2024-11-16T19:30:05,154 INFO [StoreOpener-44d75487f1e80d2d12c5e413fac668aa-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 44d75487f1e80d2d12c5e413fac668aa columnFamilyName info 2024-11-16T19:30:05,154 DEBUG [StoreOpener-44d75487f1e80d2d12c5e413fac668aa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:30:05,155 INFO [StoreOpener-44d75487f1e80d2d12c5e413fac668aa-1 {}] regionserver.HStore(327): Store=44d75487f1e80d2d12c5e413fac668aa/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T19:30:05,155 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 44d75487f1e80d2d12c5e413fac668aa 2024-11-16T19:30:05,156 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa 2024-11-16T19:30:05,156 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa 2024-11-16T19:30:05,157 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 44d75487f1e80d2d12c5e413fac668aa 2024-11-16T19:30:05,157 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 44d75487f1e80d2d12c5e413fac668aa 2024-11-16T19:30:05,158 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 44d75487f1e80d2d12c5e413fac668aa 2024-11-16T19:30:05,161 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T19:30:05,161 INFO [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 44d75487f1e80d2d12c5e413fac668aa; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=710632, jitterRate=-0.09638531506061554}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T19:30:05,161 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 44d75487f1e80d2d12c5e413fac668aa 2024-11-16T19:30:05,162 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 44d75487f1e80d2d12c5e413fac668aa: Running coprocessor pre-open hook at 1731785405151Writing region info on filesystem at 1731785405151Initializing all the Stores at 1731785405152 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785405152Cleaning up temporary data from old regions at 1731785405157 (+5 ms)Running coprocessor post-open hooks at 1731785405161 (+4 ms)Region opened successfully at 1731785405162 (+1 ms) 2024-11-16T19:30:05,163 INFO [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa., pid=6, masterSystemTime=1731785405147 2024-11-16T19:30:05,165 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa. 2024-11-16T19:30:05,165 INFO [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa. 2024-11-16T19:30:05,166 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=44d75487f1e80d2d12c5e413fac668aa, regionState=OPEN, openSeqNum=2, regionLocation=d11ab77873cb,41539,1731785403984 2024-11-16T19:30:05,168 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 44d75487f1e80d2d12c5e413fac668aa, server=d11ab77873cb,41539,1731785403984 because future has completed 2024-11-16T19:30:05,171 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-16T19:30:05,172 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 44d75487f1e80d2d12c5e413fac668aa, server=d11ab77873cb,41539,1731785403984 in 175 msec 2024-11-16T19:30:05,174 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-16T19:30:05,174 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=44d75487f1e80d2d12c5e413fac668aa, ASSIGN in 333 msec 2024-11-16T19:30:05,175 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T19:30:05,176 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731785405176"}]},"ts":"1731785405176"} 2024-11-16T19:30:05,178 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-16T19:30:05,180 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T19:30:05,182 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 367 msec 2024-11-16T19:30:05,530 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:05,531 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:06,530 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:06,531 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:07,531 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:07,532 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:07,638 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:07,639 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:07,639 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:07,639 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:07,640 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:07,640 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:07,641 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:07,642 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:07,661 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:07,661 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:07,661 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:07,661 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:07,661 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:07,661 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:07,665 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:07,665 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:07,665 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:07,667 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:08,173 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T19:30:08,176 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:08,177 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:08,177 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:08,177 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:08,178 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:08,178 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:08,179 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:08,179 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:08,200 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:08,200 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:08,200 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:08,200 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:08,200 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:08,201 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:08,204 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:08,204 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:08,204 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:08,207 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:08,454 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-16T19:30:08,455 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-16T19:30:08,456 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T19:30:08,532 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:08,532 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:09,533 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:09,534 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:10,224 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T19:30:10,226 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-16T19:30:10,535 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:10,535 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:11,536 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:11,536 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:12,538 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:12,538 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:13,539 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:13,539 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:13,959 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T19:30:13,960 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:13,960 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:13,960 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:13,961 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:13,961 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:13,961 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:13,961 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:13,962 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:13,983 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:13,983 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:13,984 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:13,984 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:13,984 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:13,984 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:13,988 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:13,988 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:13,988 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:13,991 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:14,540 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:14,540 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:14,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43337 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T19:30:14,848 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-16T19:30:14,849 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-16T19:30:14,852 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-16T19:30:14,852 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa. 2024-11-16T19:30:14,855 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa., hostname=d11ab77873cb,41539,1731785403984, seqNum=2] 2024-11-16T19:30:14,878 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 44d75487f1e80d2d12c5e413fac668aa 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T19:30:14,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41539 {}] regionserver.HRegion(8855): Flush requested on 44d75487f1e80d2d12c5e413fac668aa 2024-11-16T19:30:14,899 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/.tmp/info/619406029c9f444bbe61f3e05bb05118 is 1080, key is row0001/info:/1731785414857/Put/seqid=0 2024-11-16T19:30:14,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741837_1013 (size=12509) 2024-11-16T19:30:14,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741837_1013 (size=12509) 2024-11-16T19:30:14,922 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/.tmp/info/619406029c9f444bbe61f3e05bb05118 2024-11-16T19:30:14,931 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/.tmp/info/619406029c9f444bbe61f3e05bb05118 as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/619406029c9f444bbe61f3e05bb05118 2024-11-16T19:30:14,939 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/619406029c9f444bbe61f3e05bb05118, entries=7, sequenceid=11, filesize=12.2 K 2024-11-16T19:30:14,941 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=18.91 KB/19368 for 44d75487f1e80d2d12c5e413fac668aa in 62ms, sequenceid=11, compaction requested=false 2024-11-16T19:30:14,941 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 44d75487f1e80d2d12c5e413fac668aa: 2024-11-16T19:30:14,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41539 {}] regionserver.HRegion(8855): Flush requested on 44d75487f1e80d2d12c5e413fac668aa 2024-11-16T19:30:14,943 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 44d75487f1e80d2d12c5e413fac668aa 1/1 column families, dataSize=19.96 KB heapSize=21.63 KB 2024-11-16T19:30:14,961 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/.tmp/info/7169930b5f86478797294e063e1b75f6 is 1080, key is row0008/info:/1731785414882/Put/seqid=0 2024-11-16T19:30:14,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741838_1014 (size=25453) 2024-11-16T19:30:14,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741838_1014 (size=25453) 2024-11-16T19:30:14,984 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=19.96 KB at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/.tmp/info/7169930b5f86478797294e063e1b75f6 2024-11-16T19:30:14,992 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/.tmp/info/7169930b5f86478797294e063e1b75f6 as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/7169930b5f86478797294e063e1b75f6 2024-11-16T19:30:14,999 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/7169930b5f86478797294e063e1b75f6, entries=19, sequenceid=33, filesize=24.9 K 2024-11-16T19:30:15,001 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~19.96 KB/20444, heapSize ~21.61 KB/22128, currentSize=6.30 KB/6456 for 44d75487f1e80d2d12c5e413fac668aa in 58ms, sequenceid=33, compaction requested=false 2024-11-16T19:30:15,001 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 44d75487f1e80d2d12c5e413fac668aa: 2024-11-16T19:30:15,001 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=37.1 K, sizeToCheck=16.0 K 2024-11-16T19:30:15,001 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T19:30:15,001 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/7169930b5f86478797294e063e1b75f6 because midkey is the same as first or last row 2024-11-16T19:30:15,541 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:15,541 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:16,541 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:16,541 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:16,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41539 {}] regionserver.HRegion(8855): Flush requested on 44d75487f1e80d2d12c5e413fac668aa 2024-11-16T19:30:16,965 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 44d75487f1e80d2d12c5e413fac668aa 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T19:30:16,971 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/.tmp/info/c52338cb092847f4a8c360ac21e8ee48 is 1080, key is row0027/info:/1731785414945/Put/seqid=0 2024-11-16T19:30:16,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741839_1015 (size=12509) 2024-11-16T19:30:16,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741839_1015 (size=12509) 2024-11-16T19:30:16,991 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/.tmp/info/c52338cb092847f4a8c360ac21e8ee48 2024-11-16T19:30:16,998 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/.tmp/info/c52338cb092847f4a8c360ac21e8ee48 as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/c52338cb092847f4a8c360ac21e8ee48 2024-11-16T19:30:17,005 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/c52338cb092847f4a8c360ac21e8ee48, entries=7, sequenceid=43, filesize=12.2 K 2024-11-16T19:30:17,013 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for 44d75487f1e80d2d12c5e413fac668aa in 48ms, sequenceid=43, compaction requested=true 2024-11-16T19:30:17,013 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 44d75487f1e80d2d12c5e413fac668aa: 2024-11-16T19:30:17,014 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=49.3 K, sizeToCheck=16.0 K 2024-11-16T19:30:17,014 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T19:30:17,014 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/7169930b5f86478797294e063e1b75f6 because midkey is the same as first or last row 2024-11-16T19:30:17,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41539 {}] regionserver.HRegion(8855): Flush requested on 44d75487f1e80d2d12c5e413fac668aa 2024-11-16T19:30:17,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 44d75487f1e80d2d12c5e413fac668aa:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T19:30:17,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T19:30:17,017 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T19:30:17,017 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 44d75487f1e80d2d12c5e413fac668aa 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-16T19:30:17,018 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 50471 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T19:30:17,018 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HStore(1541): 44d75487f1e80d2d12c5e413fac668aa/info is initiating minor compaction (all files) 2024-11-16T19:30:17,018 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 44d75487f1e80d2d12c5e413fac668aa/info in TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa. 2024-11-16T19:30:17,018 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/619406029c9f444bbe61f3e05bb05118, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/7169930b5f86478797294e063e1b75f6, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/c52338cb092847f4a8c360ac21e8ee48] into tmpdir=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/.tmp, totalSize=49.3 K 2024-11-16T19:30:17,019 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.Compactor(225): Compacting 619406029c9f444bbe61f3e05bb05118, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731785414857 2024-11-16T19:30:17,019 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7169930b5f86478797294e063e1b75f6, keycount=19, bloomtype=ROW, size=24.9 K, encoding=NONE, compression=NONE, seqNum=33, earliestPutTs=1731785414882 2024-11-16T19:30:17,020 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.Compactor(225): Compacting c52338cb092847f4a8c360ac21e8ee48, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1731785414945 2024-11-16T19:30:17,021 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/.tmp/info/f4c42181cdff4305bbe6a31d3caaadf7 is 1080, key is row0034/info:/1731785416967/Put/seqid=0 2024-11-16T19:30:17,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741840_1016 (size=22222) 2024-11-16T19:30:17,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741840_1016 (size=22222) 2024-11-16T19:30:17,029 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=62 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/.tmp/info/f4c42181cdff4305bbe6a31d3caaadf7 2024-11-16T19:30:17,037 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/.tmp/info/f4c42181cdff4305bbe6a31d3caaadf7 as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/f4c42181cdff4305bbe6a31d3caaadf7 2024-11-16T19:30:17,038 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 44d75487f1e80d2d12c5e413fac668aa#info#compaction#58 average throughput is 16.93 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T19:30:17,039 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/.tmp/info/64d5701e3c83430d93b07a112b57ea21 is 1080, key is row0001/info:/1731785414857/Put/seqid=0 2024-11-16T19:30:17,049 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/f4c42181cdff4305bbe6a31d3caaadf7, entries=16, sequenceid=62, filesize=21.7 K 2024-11-16T19:30:17,050 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=11.56 KB/11836 for 44d75487f1e80d2d12c5e413fac668aa in 33ms, sequenceid=62, compaction requested=false 2024-11-16T19:30:17,050 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 44d75487f1e80d2d12c5e413fac668aa: 2024-11-16T19:30:17,050 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.0 K, sizeToCheck=16.0 K 2024-11-16T19:30:17,050 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T19:30:17,051 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/7169930b5f86478797294e063e1b75f6 because midkey is the same as first or last row 2024-11-16T19:30:17,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41539 {}] regionserver.HRegion(8855): Flush requested on 44d75487f1e80d2d12c5e413fac668aa 2024-11-16T19:30:17,052 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 44d75487f1e80d2d12c5e413fac668aa 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-16T19:30:17,067 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/.tmp/info/c17cc152a2684ec4810525c28e17d53b is 1080, key is row0050/info:/1731785417018/Put/seqid=0 2024-11-16T19:30:17,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741841_1017 (size=40670) 2024-11-16T19:30:17,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741841_1017 (size=40670) 2024-11-16T19:30:17,078 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/.tmp/info/64d5701e3c83430d93b07a112b57ea21 as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/64d5701e3c83430d93b07a112b57ea21 2024-11-16T19:30:17,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741842_1018 (size=17894) 2024-11-16T19:30:17,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741842_1018 (size=17894) 2024-11-16T19:30:17,084 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/.tmp/info/c17cc152a2684ec4810525c28e17d53b 2024-11-16T19:30:17,086 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 44d75487f1e80d2d12c5e413fac668aa/info of 44d75487f1e80d2d12c5e413fac668aa into 64d5701e3c83430d93b07a112b57ea21(size=39.7 K), total size for store is 61.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T19:30:17,086 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 44d75487f1e80d2d12c5e413fac668aa: 2024-11-16T19:30:17,086 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa., storeName=44d75487f1e80d2d12c5e413fac668aa/info, priority=13, startTime=1731785417014; duration=0sec 2024-11-16T19:30:17,086 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=61.4 K, sizeToCheck=16.0 K 2024-11-16T19:30:17,086 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T19:30:17,087 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/64d5701e3c83430d93b07a112b57ea21 because midkey is the same as first or last row 2024-11-16T19:30:17,087 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=61.4 K, sizeToCheck=16.0 K 2024-11-16T19:30:17,087 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T19:30:17,087 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/64d5701e3c83430d93b07a112b57ea21 because midkey is the same as first or last row 2024-11-16T19:30:17,087 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=61.4 K, sizeToCheck=16.0 K 2024-11-16T19:30:17,087 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T19:30:17,087 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/64d5701e3c83430d93b07a112b57ea21 because midkey is the same as first or last row 2024-11-16T19:30:17,087 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T19:30:17,087 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 44d75487f1e80d2d12c5e413fac668aa:info 2024-11-16T19:30:17,093 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/.tmp/info/c17cc152a2684ec4810525c28e17d53b as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/c17cc152a2684ec4810525c28e17d53b 2024-11-16T19:30:17,099 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/c17cc152a2684ec4810525c28e17d53b, entries=12, sequenceid=77, filesize=17.5 K 2024-11-16T19:30:17,100 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=3.15 KB/3228 for 44d75487f1e80d2d12c5e413fac668aa in 48ms, sequenceid=77, compaction requested=true 2024-11-16T19:30:17,100 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 44d75487f1e80d2d12c5e413fac668aa: 2024-11-16T19:30:17,100 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=78.9 K, sizeToCheck=16.0 K 2024-11-16T19:30:17,100 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T19:30:17,100 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/64d5701e3c83430d93b07a112b57ea21 because midkey is the same as first or last row 2024-11-16T19:30:17,100 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 44d75487f1e80d2d12c5e413fac668aa:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T19:30:17,100 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T19:30:17,100 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T19:30:17,102 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 80786 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T19:30:17,102 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HStore(1541): 44d75487f1e80d2d12c5e413fac668aa/info is initiating minor compaction (all files) 2024-11-16T19:30:17,102 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 44d75487f1e80d2d12c5e413fac668aa/info in TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa. 2024-11-16T19:30:17,102 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/64d5701e3c83430d93b07a112b57ea21, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/f4c42181cdff4305bbe6a31d3caaadf7, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/c17cc152a2684ec4810525c28e17d53b] into tmpdir=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/.tmp, totalSize=78.9 K 2024-11-16T19:30:17,102 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.Compactor(225): Compacting 64d5701e3c83430d93b07a112b57ea21, keycount=33, bloomtype=ROW, size=39.7 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1731785414857 2024-11-16T19:30:17,103 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.Compactor(225): Compacting f4c42181cdff4305bbe6a31d3caaadf7, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=62, earliestPutTs=1731785416967 2024-11-16T19:30:17,103 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.Compactor(225): Compacting c17cc152a2684ec4810525c28e17d53b, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1731785417018 2024-11-16T19:30:17,117 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 44d75487f1e80d2d12c5e413fac668aa#info#compaction#60 average throughput is 31.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T19:30:17,118 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/.tmp/info/9ab94e2ab97842c081829a4c1dedd8cf is 1080, key is row0001/info:/1731785414857/Put/seqid=0 2024-11-16T19:30:17,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741843_1019 (size=71001) 2024-11-16T19:30:17,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741843_1019 (size=71001) 2024-11-16T19:30:17,134 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/.tmp/info/9ab94e2ab97842c081829a4c1dedd8cf as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/9ab94e2ab97842c081829a4c1dedd8cf 2024-11-16T19:30:17,140 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 44d75487f1e80d2d12c5e413fac668aa/info of 44d75487f1e80d2d12c5e413fac668aa into 9ab94e2ab97842c081829a4c1dedd8cf(size=69.3 K), total size for store is 69.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T19:30:17,140 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 44d75487f1e80d2d12c5e413fac668aa: 2024-11-16T19:30:17,140 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa., storeName=44d75487f1e80d2d12c5e413fac668aa/info, priority=13, startTime=1731785417100; duration=0sec 2024-11-16T19:30:17,141 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=69.3 K, sizeToCheck=16.0 K 2024-11-16T19:30:17,141 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T19:30:17,141 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/9ab94e2ab97842c081829a4c1dedd8cf because midkey is the same as first or last row 2024-11-16T19:30:17,141 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=69.3 K, sizeToCheck=16.0 K 2024-11-16T19:30:17,141 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T19:30:17,141 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/9ab94e2ab97842c081829a4c1dedd8cf because midkey is the same as first or last row 2024-11-16T19:30:17,141 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=69.3 K, sizeToCheck=16.0 K 2024-11-16T19:30:17,141 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T19:30:17,141 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/9ab94e2ab97842c081829a4c1dedd8cf because midkey is the same as first or last row 2024-11-16T19:30:17,141 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T19:30:17,141 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 44d75487f1e80d2d12c5e413fac668aa:info 2024-11-16T19:30:17,542 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:17,542 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:18,543 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:18,543 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:19,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41539 {}] regionserver.HRegion(8855): Flush requested on 44d75487f1e80d2d12c5e413fac668aa 2024-11-16T19:30:19,076 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 44d75487f1e80d2d12c5e413fac668aa 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T19:30:19,081 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/.tmp/info/49757e756c104c1996eb4a9ac1e37409 is 1080, key is row0062/info:/1731785417053/Put/seqid=0 2024-11-16T19:30:19,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741844_1020 (size=12509) 2024-11-16T19:30:19,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741844_1020 (size=12509) 2024-11-16T19:30:19,087 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/.tmp/info/49757e756c104c1996eb4a9ac1e37409 2024-11-16T19:30:19,093 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/.tmp/info/49757e756c104c1996eb4a9ac1e37409 as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/49757e756c104c1996eb4a9ac1e37409 2024-11-16T19:30:19,099 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/49757e756c104c1996eb4a9ac1e37409, entries=7, sequenceid=89, filesize=12.2 K 2024-11-16T19:30:19,100 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 44d75487f1e80d2d12c5e413fac668aa in 24ms, sequenceid=89, compaction requested=false 2024-11-16T19:30:19,100 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 44d75487f1e80d2d12c5e413fac668aa: 2024-11-16T19:30:19,100 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=81.6 K, sizeToCheck=16.0 K 2024-11-16T19:30:19,100 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T19:30:19,100 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/9ab94e2ab97842c081829a4c1dedd8cf because midkey is the same as first or last row 2024-11-16T19:30:19,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41539 {}] regionserver.HRegion(8855): Flush requested on 44d75487f1e80d2d12c5e413fac668aa 2024-11-16T19:30:19,101 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 44d75487f1e80d2d12c5e413fac668aa 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-16T19:30:19,105 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/.tmp/info/30b79f39b1124b7580e432fcd7bc2a88 is 1080, key is row0069/info:/1731785419077/Put/seqid=0 2024-11-16T19:30:19,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741845_1021 (size=16817) 2024-11-16T19:30:19,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741845_1021 (size=16817) 2024-11-16T19:30:19,514 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=103 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/.tmp/info/30b79f39b1124b7580e432fcd7bc2a88 2024-11-16T19:30:19,526 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/.tmp/info/30b79f39b1124b7580e432fcd7bc2a88 as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/30b79f39b1124b7580e432fcd7bc2a88 2024-11-16T19:30:19,535 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/30b79f39b1124b7580e432fcd7bc2a88, entries=11, sequenceid=103, filesize=16.4 K 2024-11-16T19:30:19,536 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=17.86 KB/18292 for 44d75487f1e80d2d12c5e413fac668aa in 435ms, sequenceid=103, compaction requested=true 2024-11-16T19:30:19,536 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 44d75487f1e80d2d12c5e413fac668aa: 2024-11-16T19:30:19,536 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=98.0 K, sizeToCheck=16.0 K 2024-11-16T19:30:19,536 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T19:30:19,536 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/9ab94e2ab97842c081829a4c1dedd8cf because midkey is the same as first or last row 2024-11-16T19:30:19,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 44d75487f1e80d2d12c5e413fac668aa:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T19:30:19,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T19:30:19,536 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T19:30:19,538 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 100327 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T19:30:19,538 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HStore(1541): 44d75487f1e80d2d12c5e413fac668aa/info is initiating minor compaction (all files) 2024-11-16T19:30:19,538 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 44d75487f1e80d2d12c5e413fac668aa/info in TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa. 2024-11-16T19:30:19,538 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/9ab94e2ab97842c081829a4c1dedd8cf, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/49757e756c104c1996eb4a9ac1e37409, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/30b79f39b1124b7580e432fcd7bc2a88] into tmpdir=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/.tmp, totalSize=98.0 K 2024-11-16T19:30:19,539 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9ab94e2ab97842c081829a4c1dedd8cf, keycount=61, bloomtype=ROW, size=69.3 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1731785414857 2024-11-16T19:30:19,539 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.Compactor(225): Compacting 49757e756c104c1996eb4a9ac1e37409, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1731785417053 2024-11-16T19:30:19,540 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.Compactor(225): Compacting 30b79f39b1124b7580e432fcd7bc2a88, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=103, earliestPutTs=1731785419077 2024-11-16T19:30:19,544 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:19,544 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:19,555 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 44d75487f1e80d2d12c5e413fac668aa#info#compaction#63 average throughput is 27.02 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T19:30:19,556 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/.tmp/info/1c83772e17e2450abee9792923aa63f6 is 1080, key is row0001/info:/1731785414857/Put/seqid=0 2024-11-16T19:30:19,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741846_1022 (size=90562) 2024-11-16T19:30:19,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741846_1022 (size=90562) 2024-11-16T19:30:19,565 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/.tmp/info/1c83772e17e2450abee9792923aa63f6 as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/1c83772e17e2450abee9792923aa63f6 2024-11-16T19:30:19,571 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 44d75487f1e80d2d12c5e413fac668aa/info of 44d75487f1e80d2d12c5e413fac668aa into 1c83772e17e2450abee9792923aa63f6(size=88.4 K), total size for store is 88.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T19:30:19,571 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 44d75487f1e80d2d12c5e413fac668aa: 2024-11-16T19:30:19,572 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa., storeName=44d75487f1e80d2d12c5e413fac668aa/info, priority=13, startTime=1731785419536; duration=0sec 2024-11-16T19:30:19,572 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=88.4 K, sizeToCheck=16.0 K 2024-11-16T19:30:19,572 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T19:30:19,572 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=88.4 K, sizeToCheck=16.0 K 2024-11-16T19:30:19,572 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T19:30:19,572 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=88.4 K, sizeToCheck=16.0 K 2024-11-16T19:30:19,572 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T19:30:19,573 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T19:30:19,573 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T19:30:19,573 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 44d75487f1e80d2d12c5e413fac668aa:info 2024-11-16T19:30:19,574 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43337 {}] assignment.AssignmentManager(1355): Split request from d11ab77873cb,41539,1731785403984, parent={ENCODED => 44d75487f1e80d2d12c5e413fac668aa, NAME => 'TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-16T19:30:19,579 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43337 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=d11ab77873cb,41539,1731785403984 2024-11-16T19:30:19,583 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43337 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=44d75487f1e80d2d12c5e413fac668aa, daughterA=f88eb02d4f0e204056c921e0c9eab377, daughterB=efca89295498edf3132003eebd0ddf83 2024-11-16T19:30:19,584 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=44d75487f1e80d2d12c5e413fac668aa, daughterA=f88eb02d4f0e204056c921e0c9eab377, daughterB=efca89295498edf3132003eebd0ddf83 2024-11-16T19:30:19,584 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=44d75487f1e80d2d12c5e413fac668aa, daughterA=f88eb02d4f0e204056c921e0c9eab377, daughterB=efca89295498edf3132003eebd0ddf83 2024-11-16T19:30:19,585 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=44d75487f1e80d2d12c5e413fac668aa, daughterA=f88eb02d4f0e204056c921e0c9eab377, daughterB=efca89295498edf3132003eebd0ddf83 2024-11-16T19:30:19,590 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=44d75487f1e80d2d12c5e413fac668aa, UNASSIGN}] 2024-11-16T19:30:19,591 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=44d75487f1e80d2d12c5e413fac668aa, UNASSIGN 2024-11-16T19:30:19,593 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=44d75487f1e80d2d12c5e413fac668aa, regionState=CLOSING, regionLocation=d11ab77873cb,41539,1731785403984 2024-11-16T19:30:19,595 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=44d75487f1e80d2d12c5e413fac668aa, UNASSIGN because future has completed 2024-11-16T19:30:19,596 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-16T19:30:19,596 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 44d75487f1e80d2d12c5e413fac668aa, server=d11ab77873cb,41539,1731785403984}] 2024-11-16T19:30:19,755 INFO [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 44d75487f1e80d2d12c5e413fac668aa 2024-11-16T19:30:19,755 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-16T19:30:19,756 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 44d75487f1e80d2d12c5e413fac668aa, disabling compactions & flushes 2024-11-16T19:30:19,756 INFO [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa. 2024-11-16T19:30:19,756 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa. 2024-11-16T19:30:19,756 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa. after waiting 0 ms 2024-11-16T19:30:19,756 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa. 2024-11-16T19:30:19,757 INFO [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 44d75487f1e80d2d12c5e413fac668aa 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-11-16T19:30:19,763 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/.tmp/info/2ea02c2a2db4499e86404cc6e3654774 is 1080, key is row0080/info:/1731785419102/Put/seqid=0 2024-11-16T19:30:19,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741847_1023 (size=23299) 2024-11-16T19:30:19,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741847_1023 (size=23299) 2024-11-16T19:30:19,768 INFO [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=124 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/.tmp/info/2ea02c2a2db4499e86404cc6e3654774 2024-11-16T19:30:19,773 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/.tmp/info/2ea02c2a2db4499e86404cc6e3654774 as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/2ea02c2a2db4499e86404cc6e3654774 2024-11-16T19:30:19,778 INFO [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/2ea02c2a2db4499e86404cc6e3654774, entries=17, sequenceid=124, filesize=22.8 K 2024-11-16T19:30:19,779 INFO [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=0 B/0 for 44d75487f1e80d2d12c5e413fac668aa in 22ms, sequenceid=124, compaction requested=false 2024-11-16T19:30:19,780 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/619406029c9f444bbe61f3e05bb05118, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/7169930b5f86478797294e063e1b75f6, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/64d5701e3c83430d93b07a112b57ea21, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/c52338cb092847f4a8c360ac21e8ee48, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/f4c42181cdff4305bbe6a31d3caaadf7, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/9ab94e2ab97842c081829a4c1dedd8cf, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/c17cc152a2684ec4810525c28e17d53b, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/49757e756c104c1996eb4a9ac1e37409, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/30b79f39b1124b7580e432fcd7bc2a88] to archive 2024-11-16T19:30:19,781 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T19:30:19,782 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/619406029c9f444bbe61f3e05bb05118 to hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/archive/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/619406029c9f444bbe61f3e05bb05118 2024-11-16T19:30:19,783 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/7169930b5f86478797294e063e1b75f6 to hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/archive/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/7169930b5f86478797294e063e1b75f6 2024-11-16T19:30:19,784 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/64d5701e3c83430d93b07a112b57ea21 to hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/archive/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/64d5701e3c83430d93b07a112b57ea21 2024-11-16T19:30:19,785 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/c52338cb092847f4a8c360ac21e8ee48 to hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/archive/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/c52338cb092847f4a8c360ac21e8ee48 2024-11-16T19:30:19,786 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/f4c42181cdff4305bbe6a31d3caaadf7 to hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/archive/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/f4c42181cdff4305bbe6a31d3caaadf7 2024-11-16T19:30:19,788 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/9ab94e2ab97842c081829a4c1dedd8cf to hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/archive/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/9ab94e2ab97842c081829a4c1dedd8cf 2024-11-16T19:30:19,789 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/c17cc152a2684ec4810525c28e17d53b to hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/archive/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/c17cc152a2684ec4810525c28e17d53b 2024-11-16T19:30:19,790 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/49757e756c104c1996eb4a9ac1e37409 to hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/archive/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/49757e756c104c1996eb4a9ac1e37409 2024-11-16T19:30:19,791 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/30b79f39b1124b7580e432fcd7bc2a88 to hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/archive/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/30b79f39b1124b7580e432fcd7bc2a88 2024-11-16T19:30:19,797 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/recovered.edits/127.seqid, newMaxSeqId=127, maxSeqId=1 2024-11-16T19:30:19,798 INFO [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa. 2024-11-16T19:30:19,798 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 44d75487f1e80d2d12c5e413fac668aa: Waiting for close lock at 1731785419756Running coprocessor pre-close hooks at 1731785419756Disabling compacts and flushes for region at 1731785419756Disabling writes for close at 1731785419756Obtaining lock to block concurrent updates at 1731785419757 (+1 ms)Preparing flush snapshotting stores in 44d75487f1e80d2d12c5e413fac668aa at 1731785419757Finished memstore snapshotting TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa., syncing WAL and waiting on mvcc, flushsize=dataSize=18292, getHeapSize=19824, getOffHeapSize=0, getCellsCount=17 at 1731785419757Flushing stores of TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa. at 1731785419759 (+2 ms)Flushing 44d75487f1e80d2d12c5e413fac668aa/info: creating writer at 1731785419759Flushing 44d75487f1e80d2d12c5e413fac668aa/info: appending metadata at 1731785419763 (+4 ms)Flushing 44d75487f1e80d2d12c5e413fac668aa/info: closing flushed file at 1731785419763Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@67863514: reopening flushed file at 1731785419772 (+9 ms)Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=0 B/0 for 44d75487f1e80d2d12c5e413fac668aa in 22ms, sequenceid=124, compaction requested=false at 1731785419779 (+7 ms)Writing region close event to WAL at 1731785419794 (+15 ms)Running coprocessor post-close hooks at 1731785419798 (+4 ms)Closed at 1731785419798 2024-11-16T19:30:19,800 INFO [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 44d75487f1e80d2d12c5e413fac668aa 2024-11-16T19:30:19,800 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=44d75487f1e80d2d12c5e413fac668aa, regionState=CLOSED 2024-11-16T19:30:19,802 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 44d75487f1e80d2d12c5e413fac668aa, server=d11ab77873cb,41539,1731785403984 because future has completed 2024-11-16T19:30:19,805 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-16T19:30:19,805 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 44d75487f1e80d2d12c5e413fac668aa, server=d11ab77873cb,41539,1731785403984 in 207 msec 2024-11-16T19:30:19,807 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-16T19:30:19,807 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=44d75487f1e80d2d12c5e413fac668aa, UNASSIGN in 215 msec 2024-11-16T19:30:19,814 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:30:19,817 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 2 storefiles, region=44d75487f1e80d2d12c5e413fac668aa, threads=2 2024-11-16T19:30:19,819 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/2ea02c2a2db4499e86404cc6e3654774 for region: 44d75487f1e80d2d12c5e413fac668aa 2024-11-16T19:30:19,819 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/1c83772e17e2450abee9792923aa63f6 for region: 44d75487f1e80d2d12c5e413fac668aa 2024-11-16T19:30:19,829 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/2ea02c2a2db4499e86404cc6e3654774, top=true 2024-11-16T19:30:19,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741848_1024 (size=27) 2024-11-16T19:30:19,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741848_1024 (size=27) 2024-11-16T19:30:19,839 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/TestLogRolling-testLogRolling=44d75487f1e80d2d12c5e413fac668aa-2ea02c2a2db4499e86404cc6e3654774 for child: efca89295498edf3132003eebd0ddf83, parent: 44d75487f1e80d2d12c5e413fac668aa 2024-11-16T19:30:19,839 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/2ea02c2a2db4499e86404cc6e3654774 for region: 44d75487f1e80d2d12c5e413fac668aa 2024-11-16T19:30:19,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741849_1025 (size=27) 2024-11-16T19:30:19,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741849_1025 (size=27) 2024-11-16T19:30:19,849 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/1c83772e17e2450abee9792923aa63f6 for region: 44d75487f1e80d2d12c5e413fac668aa 2024-11-16T19:30:19,851 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 44d75487f1e80d2d12c5e413fac668aa Daughter A: [hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/f88eb02d4f0e204056c921e0c9eab377/info/1c83772e17e2450abee9792923aa63f6.44d75487f1e80d2d12c5e413fac668aa] storefiles, Daughter B: [hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/1c83772e17e2450abee9792923aa63f6.44d75487f1e80d2d12c5e413fac668aa, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/TestLogRolling-testLogRolling=44d75487f1e80d2d12c5e413fac668aa-2ea02c2a2db4499e86404cc6e3654774] storefiles. 2024-11-16T19:30:19,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741850_1026 (size=71) 2024-11-16T19:30:19,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741850_1026 (size=71) 2024-11-16T19:30:19,860 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:30:19,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741851_1027 (size=71) 2024-11-16T19:30:19,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741851_1027 (size=71) 2024-11-16T19:30:19,874 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:30:19,884 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/f88eb02d4f0e204056c921e0c9eab377/recovered.edits/127.seqid, newMaxSeqId=127, maxSeqId=-1 2024-11-16T19:30:19,886 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/recovered.edits/127.seqid, newMaxSeqId=127, maxSeqId=-1 2024-11-16T19:30:19,889 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731785419888"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731785419888"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731785419888"}]},"ts":"1731785419888"} 2024-11-16T19:30:19,889 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731785419579.f88eb02d4f0e204056c921e0c9eab377.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731785419888"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731785419888"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731785419888"}]},"ts":"1731785419888"} 2024-11-16T19:30:19,890 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731785419888"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731785419888"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731785419888"}]},"ts":"1731785419888"} 2024-11-16T19:30:19,910 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f88eb02d4f0e204056c921e0c9eab377, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=efca89295498edf3132003eebd0ddf83, ASSIGN}] 2024-11-16T19:30:19,912 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f88eb02d4f0e204056c921e0c9eab377, ASSIGN 2024-11-16T19:30:19,912 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=efca89295498edf3132003eebd0ddf83, ASSIGN 2024-11-16T19:30:19,913 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f88eb02d4f0e204056c921e0c9eab377, ASSIGN; state=SPLITTING_NEW, location=d11ab77873cb,41539,1731785403984; forceNewPlan=false, retain=false 2024-11-16T19:30:19,913 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=efca89295498edf3132003eebd0ddf83, ASSIGN; state=SPLITTING_NEW, location=d11ab77873cb,41539,1731785403984; forceNewPlan=false, retain=false 2024-11-16T19:30:20,064 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=efca89295498edf3132003eebd0ddf83, regionState=OPENING, regionLocation=d11ab77873cb,41539,1731785403984 2024-11-16T19:30:20,064 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=f88eb02d4f0e204056c921e0c9eab377, regionState=OPENING, regionLocation=d11ab77873cb,41539,1731785403984 2024-11-16T19:30:20,066 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=efca89295498edf3132003eebd0ddf83, ASSIGN because future has completed 2024-11-16T19:30:20,067 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure efca89295498edf3132003eebd0ddf83, server=d11ab77873cb,41539,1731785403984}] 2024-11-16T19:30:20,067 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f88eb02d4f0e204056c921e0c9eab377, ASSIGN because future has completed 2024-11-16T19:30:20,068 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure f88eb02d4f0e204056c921e0c9eab377, server=d11ab77873cb,41539,1731785403984}] 2024-11-16T19:30:20,222 INFO [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731785419579.f88eb02d4f0e204056c921e0c9eab377. 2024-11-16T19:30:20,222 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => f88eb02d4f0e204056c921e0c9eab377, NAME => 'TestLogRolling-testLogRolling,,1731785419579.f88eb02d4f0e204056c921e0c9eab377.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-16T19:30:20,222 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling f88eb02d4f0e204056c921e0c9eab377 2024-11-16T19:30:20,223 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731785419579.f88eb02d4f0e204056c921e0c9eab377.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:30:20,223 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for f88eb02d4f0e204056c921e0c9eab377 2024-11-16T19:30:20,223 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for f88eb02d4f0e204056c921e0c9eab377 2024-11-16T19:30:20,224 INFO [StoreOpener-f88eb02d4f0e204056c921e0c9eab377-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region f88eb02d4f0e204056c921e0c9eab377 2024-11-16T19:30:20,225 INFO [StoreOpener-f88eb02d4f0e204056c921e0c9eab377-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f88eb02d4f0e204056c921e0c9eab377 columnFamilyName info 2024-11-16T19:30:20,225 DEBUG [StoreOpener-f88eb02d4f0e204056c921e0c9eab377-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:30:20,239 DEBUG [StoreOpener-f88eb02d4f0e204056c921e0c9eab377-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/f88eb02d4f0e204056c921e0c9eab377/info/1c83772e17e2450abee9792923aa63f6.44d75487f1e80d2d12c5e413fac668aa->hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/1c83772e17e2450abee9792923aa63f6-bottom 2024-11-16T19:30:20,240 INFO [StoreOpener-f88eb02d4f0e204056c921e0c9eab377-1 {}] regionserver.HStore(327): Store=f88eb02d4f0e204056c921e0c9eab377/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T19:30:20,240 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for f88eb02d4f0e204056c921e0c9eab377 2024-11-16T19:30:20,241 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/f88eb02d4f0e204056c921e0c9eab377 2024-11-16T19:30:20,242 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/f88eb02d4f0e204056c921e0c9eab377 2024-11-16T19:30:20,243 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for f88eb02d4f0e204056c921e0c9eab377 2024-11-16T19:30:20,243 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for f88eb02d4f0e204056c921e0c9eab377 2024-11-16T19:30:20,245 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for f88eb02d4f0e204056c921e0c9eab377 2024-11-16T19:30:20,246 INFO [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened f88eb02d4f0e204056c921e0c9eab377; next sequenceid=128; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=836452, jitterRate=0.06360448896884918}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T19:30:20,246 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f88eb02d4f0e204056c921e0c9eab377 2024-11-16T19:30:20,246 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for f88eb02d4f0e204056c921e0c9eab377: Running coprocessor pre-open hook at 1731785420223Writing region info on filesystem at 1731785420223Initializing all the Stores at 1731785420224 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785420224Cleaning up temporary data from old regions at 1731785420243 (+19 ms)Running coprocessor post-open hooks at 1731785420246 (+3 ms)Region opened successfully at 1731785420246 2024-11-16T19:30:20,247 INFO [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731785419579.f88eb02d4f0e204056c921e0c9eab377., pid=13, masterSystemTime=1731785420219 2024-11-16T19:30:20,248 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store f88eb02d4f0e204056c921e0c9eab377:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T19:30:20,248 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T19:30:20,248 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-16T19:30:20,248 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731785419579.f88eb02d4f0e204056c921e0c9eab377. 2024-11-16T19:30:20,248 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HStore(1541): f88eb02d4f0e204056c921e0c9eab377/info is initiating minor compaction (all files) 2024-11-16T19:30:20,248 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f88eb02d4f0e204056c921e0c9eab377/info in TestLogRolling-testLogRolling,,1731785419579.f88eb02d4f0e204056c921e0c9eab377. 2024-11-16T19:30:20,249 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/f88eb02d4f0e204056c921e0c9eab377/info/1c83772e17e2450abee9792923aa63f6.44d75487f1e80d2d12c5e413fac668aa->hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/1c83772e17e2450abee9792923aa63f6-bottom] into tmpdir=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/f88eb02d4f0e204056c921e0c9eab377/.tmp, totalSize=88.4 K 2024-11-16T19:30:20,249 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1c83772e17e2450abee9792923aa63f6.44d75487f1e80d2d12c5e413fac668aa, keycount=39, bloomtype=ROW, size=88.4 K, encoding=NONE, compression=NONE, seqNum=103, earliestPutTs=1731785414857 2024-11-16T19:30:20,250 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731785419579.f88eb02d4f0e204056c921e0c9eab377. 2024-11-16T19:30:20,250 INFO [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731785419579.f88eb02d4f0e204056c921e0c9eab377. 2024-11-16T19:30:20,250 INFO [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83. 2024-11-16T19:30:20,250 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => efca89295498edf3132003eebd0ddf83, NAME => 'TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-16T19:30:20,250 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling efca89295498edf3132003eebd0ddf83 2024-11-16T19:30:20,251 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:30:20,251 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=f88eb02d4f0e204056c921e0c9eab377, regionState=OPEN, openSeqNum=128, regionLocation=d11ab77873cb,41539,1731785403984 2024-11-16T19:30:20,251 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for efca89295498edf3132003eebd0ddf83 2024-11-16T19:30:20,251 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for efca89295498edf3132003eebd0ddf83 2024-11-16T19:30:20,252 INFO [StoreOpener-efca89295498edf3132003eebd0ddf83-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region efca89295498edf3132003eebd0ddf83 2024-11-16T19:30:20,253 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41539 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-16T19:30:20,253 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-16T19:30:20,253 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-16T19:30:20,253 INFO [StoreOpener-efca89295498edf3132003eebd0ddf83-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region efca89295498edf3132003eebd0ddf83 columnFamilyName info 2024-11-16T19:30:20,253 DEBUG [StoreOpener-efca89295498edf3132003eebd0ddf83-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:30:20,253 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure f88eb02d4f0e204056c921e0c9eab377, server=d11ab77873cb,41539,1731785403984 because future has completed 2024-11-16T19:30:20,258 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=10 2024-11-16T19:30:20,258 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure f88eb02d4f0e204056c921e0c9eab377, server=d11ab77873cb,41539,1731785403984 in 186 msec 2024-11-16T19:30:20,259 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f88eb02d4f0e204056c921e0c9eab377, ASSIGN in 347 msec 2024-11-16T19:30:20,271 DEBUG [StoreOpener-efca89295498edf3132003eebd0ddf83-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/1c83772e17e2450abee9792923aa63f6.44d75487f1e80d2d12c5e413fac668aa->hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/1c83772e17e2450abee9792923aa63f6-top 2024-11-16T19:30:20,273 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/hbase/meta/1588230740/.tmp/info/49c288fa89084939975942ef09d534ab is 193, key is TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83./info:regioninfo/1731785420064/Put/seqid=0 2024-11-16T19:30:20,275 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f88eb02d4f0e204056c921e0c9eab377#info#compaction#66 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T19:30:20,275 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/f88eb02d4f0e204056c921e0c9eab377/.tmp/info/943e66b48f0544dea8fc8c77d54feef0 is 1080, key is row0001/info:/1731785414857/Put/seqid=0 2024-11-16T19:30:20,278 DEBUG [StoreOpener-efca89295498edf3132003eebd0ddf83-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/TestLogRolling-testLogRolling=44d75487f1e80d2d12c5e413fac668aa-2ea02c2a2db4499e86404cc6e3654774 2024-11-16T19:30:20,278 INFO [StoreOpener-efca89295498edf3132003eebd0ddf83-1 {}] regionserver.HStore(327): Store=efca89295498edf3132003eebd0ddf83/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T19:30:20,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741852_1028 (size=9847) 2024-11-16T19:30:20,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741852_1028 (size=9847) 2024-11-16T19:30:20,283 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/hbase/meta/1588230740/.tmp/info/49c288fa89084939975942ef09d534ab 2024-11-16T19:30:20,286 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for efca89295498edf3132003eebd0ddf83 2024-11-16T19:30:20,286 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83 2024-11-16T19:30:20,288 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83 2024-11-16T19:30:20,288 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for efca89295498edf3132003eebd0ddf83 2024-11-16T19:30:20,289 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for efca89295498edf3132003eebd0ddf83 2024-11-16T19:30:20,290 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for efca89295498edf3132003eebd0ddf83 2024-11-16T19:30:20,291 INFO [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened efca89295498edf3132003eebd0ddf83; next sequenceid=128; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=846467, jitterRate=0.07633902132511139}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T19:30:20,291 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for efca89295498edf3132003eebd0ddf83 2024-11-16T19:30:20,292 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for efca89295498edf3132003eebd0ddf83: Running coprocessor pre-open hook at 1731785420251Writing region info on filesystem at 1731785420251Initializing all the Stores at 1731785420251Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785420251Cleaning up temporary data from old regions at 1731785420289 (+38 ms)Running coprocessor post-open hooks at 1731785420291 (+2 ms)Region opened successfully at 1731785420291 2024-11-16T19:30:20,292 INFO [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83., pid=12, masterSystemTime=1731785420219 2024-11-16T19:30:20,293 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store efca89295498edf3132003eebd0ddf83:info, priority=-2147483648, current under compaction store size is 2 2024-11-16T19:30:20,293 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T19:30:20,293 DEBUG [RS:0;d11ab77873cb:41539-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-16T19:30:20,294 INFO [RS:0;d11ab77873cb:41539-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83. 2024-11-16T19:30:20,294 DEBUG [RS:0;d11ab77873cb:41539-longCompactions-0 {}] regionserver.HStore(1541): efca89295498edf3132003eebd0ddf83/info is initiating minor compaction (all files) 2024-11-16T19:30:20,294 INFO [RS:0;d11ab77873cb:41539-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of efca89295498edf3132003eebd0ddf83/info in TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83. 2024-11-16T19:30:20,294 INFO [RS:0;d11ab77873cb:41539-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/1c83772e17e2450abee9792923aa63f6.44d75487f1e80d2d12c5e413fac668aa->hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/1c83772e17e2450abee9792923aa63f6-top, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/TestLogRolling-testLogRolling=44d75487f1e80d2d12c5e413fac668aa-2ea02c2a2db4499e86404cc6e3654774] into tmpdir=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp, totalSize=111.2 K 2024-11-16T19:30:20,294 DEBUG [RS:0;d11ab77873cb:41539-longCompactions-0 {}] compactions.Compactor(225): Compacting 1c83772e17e2450abee9792923aa63f6.44d75487f1e80d2d12c5e413fac668aa, keycount=39, bloomtype=ROW, size=88.4 K, encoding=NONE, compression=NONE, seqNum=104, earliestPutTs=1731785414857 2024-11-16T19:30:20,295 DEBUG [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83. 2024-11-16T19:30:20,295 INFO [RS_OPEN_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83. 2024-11-16T19:30:20,295 DEBUG [RS:0;d11ab77873cb:41539-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=44d75487f1e80d2d12c5e413fac668aa-2ea02c2a2db4499e86404cc6e3654774, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=124, earliestPutTs=1731785419102 2024-11-16T19:30:20,296 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=efca89295498edf3132003eebd0ddf83, regionState=OPEN, openSeqNum=128, regionLocation=d11ab77873cb,41539,1731785403984 2024-11-16T19:30:20,298 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure efca89295498edf3132003eebd0ddf83, server=d11ab77873cb,41539,1731785403984 because future has completed 2024-11-16T19:30:20,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741853_1029 (size=70862) 2024-11-16T19:30:20,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741853_1029 (size=70862) 2024-11-16T19:30:20,307 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-16T19:30:20,307 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure efca89295498edf3132003eebd0ddf83, server=d11ab77873cb,41539,1731785403984 in 238 msec 2024-11-16T19:30:20,309 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-16T19:30:20,309 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=efca89295498edf3132003eebd0ddf83, ASSIGN in 397 msec 2024-11-16T19:30:20,312 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=44d75487f1e80d2d12c5e413fac668aa, daughterA=f88eb02d4f0e204056c921e0c9eab377, daughterB=efca89295498edf3132003eebd0ddf83 in 730 msec 2024-11-16T19:30:20,315 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/f88eb02d4f0e204056c921e0c9eab377/.tmp/info/943e66b48f0544dea8fc8c77d54feef0 as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/f88eb02d4f0e204056c921e0c9eab377/info/943e66b48f0544dea8fc8c77d54feef0 2024-11-16T19:30:20,320 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/hbase/meta/1588230740/.tmp/ns/6fc0769027974579bf63523e59758801 is 43, key is default/ns:d/1731785404756/Put/seqid=0 2024-11-16T19:30:20,320 INFO [RS:0;d11ab77873cb:41539-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): efca89295498edf3132003eebd0ddf83#info#compaction#67 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T19:30:20,321 DEBUG [RS:0;d11ab77873cb:41539-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/f7b7bb877c5b4ee88836f9ecba37681f is 1080, key is row0062/info:/1731785417053/Put/seqid=0 2024-11-16T19:30:20,322 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in f88eb02d4f0e204056c921e0c9eab377/info of f88eb02d4f0e204056c921e0c9eab377 into 943e66b48f0544dea8fc8c77d54feef0(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T19:30:20,322 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f88eb02d4f0e204056c921e0c9eab377: 2024-11-16T19:30:20,322 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731785419579.f88eb02d4f0e204056c921e0c9eab377., storeName=f88eb02d4f0e204056c921e0c9eab377/info, priority=15, startTime=1731785420247; duration=0sec 2024-11-16T19:30:20,323 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T19:30:20,323 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f88eb02d4f0e204056c921e0c9eab377:info 2024-11-16T19:30:20,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741854_1030 (size=42887) 2024-11-16T19:30:20,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741854_1030 (size=42887) 2024-11-16T19:30:20,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741855_1031 (size=5153) 2024-11-16T19:30:20,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741855_1031 (size=5153) 2024-11-16T19:30:20,338 DEBUG [RS:0;d11ab77873cb:41539-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/f7b7bb877c5b4ee88836f9ecba37681f as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/f7b7bb877c5b4ee88836f9ecba37681f 2024-11-16T19:30:20,346 INFO [RS:0;d11ab77873cb:41539-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 2 (all) file(s) in efca89295498edf3132003eebd0ddf83/info of efca89295498edf3132003eebd0ddf83 into f7b7bb877c5b4ee88836f9ecba37681f(size=41.9 K), total size for store is 41.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T19:30:20,346 DEBUG [RS:0;d11ab77873cb:41539-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for efca89295498edf3132003eebd0ddf83: 2024-11-16T19:30:20,346 INFO [RS:0;d11ab77873cb:41539-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83., storeName=efca89295498edf3132003eebd0ddf83/info, priority=14, startTime=1731785420292; duration=0sec 2024-11-16T19:30:20,346 DEBUG [RS:0;d11ab77873cb:41539-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T19:30:20,346 DEBUG [RS:0;d11ab77873cb:41539-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: efca89295498edf3132003eebd0ddf83:info 2024-11-16T19:30:20,544 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:20,544 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:20,733 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/hbase/meta/1588230740/.tmp/ns/6fc0769027974579bf63523e59758801 2024-11-16T19:30:20,760 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/hbase/meta/1588230740/.tmp/table/b71c802034f84e74ac9d3186de6272aa is 65, key is TestLogRolling-testLogRolling/table:state/1731785405176/Put/seqid=0 2024-11-16T19:30:20,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741856_1032 (size=5340) 2024-11-16T19:30:20,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741856_1032 (size=5340) 2024-11-16T19:30:20,765 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/hbase/meta/1588230740/.tmp/table/b71c802034f84e74ac9d3186de6272aa 2024-11-16T19:30:20,770 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/hbase/meta/1588230740/.tmp/info/49c288fa89084939975942ef09d534ab as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/hbase/meta/1588230740/info/49c288fa89084939975942ef09d534ab 2024-11-16T19:30:20,775 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/hbase/meta/1588230740/info/49c288fa89084939975942ef09d534ab, entries=30, sequenceid=17, filesize=9.6 K 2024-11-16T19:30:20,776 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/hbase/meta/1588230740/.tmp/ns/6fc0769027974579bf63523e59758801 as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/hbase/meta/1588230740/ns/6fc0769027974579bf63523e59758801 2024-11-16T19:30:20,782 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/hbase/meta/1588230740/ns/6fc0769027974579bf63523e59758801, entries=2, sequenceid=17, filesize=5.0 K 2024-11-16T19:30:20,783 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/hbase/meta/1588230740/.tmp/table/b71c802034f84e74ac9d3186de6272aa as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/hbase/meta/1588230740/table/b71c802034f84e74ac9d3186de6272aa 2024-11-16T19:30:20,789 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/hbase/meta/1588230740/table/b71c802034f84e74ac9d3186de6272aa, entries=2, sequenceid=17, filesize=5.2 K 2024-11-16T19:30:20,790 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 536ms, sequenceid=17, compaction requested=false 2024-11-16T19:30:20,790 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-16T19:30:21,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41539 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:52850 deadline: 1731785431132, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa. is not online on d11ab77873cb,41539,1731785403984 2024-11-16T19:30:21,158 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa., hostname=d11ab77873cb,41539,1731785403984, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa., hostname=d11ab77873cb,41539,1731785403984, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa. is not online on d11ab77873cb,41539,1731785403984 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T19:30:21,159 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa., hostname=d11ab77873cb,41539,1731785403984, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa. is not online on d11ab77873cb,41539,1731785403984 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T19:30:21,159 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731785404813.44d75487f1e80d2d12c5e413fac668aa., hostname=d11ab77873cb,41539,1731785403984, seqNum=2 from cache 2024-11-16T19:30:21,545 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:21,545 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:22,546 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:22,546 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:23,548 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:23,548 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:24,549 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:24,549 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:24,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:24,800 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:24,800 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:24,801 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:24,801 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:24,802 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:24,804 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:24,804 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:24,826 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:24,826 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:24,826 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:24,826 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:24,826 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:24,827 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:24,830 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:24,830 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:24,830 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:24,832 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:25,340 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T19:30:25,342 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:25,343 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:25,344 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:25,344 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:25,345 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:25,346 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:25,348 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:25,348 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:25,367 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:25,367 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:25,367 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:25,367 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:25,368 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:25,368 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:25,371 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:25,371 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:25,371 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:25,373 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T19:30:25,550 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:25,550 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:26,551 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:26,551 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:27,552 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:27,552 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:28,553 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:28,553 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:29,554 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:29,554 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:30,555 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:30,555 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:31,276 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83., hostname=d11ab77873cb,41539,1731785403984, seqNum=128] 2024-11-16T19:30:31,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41539 {}] regionserver.HRegion(8855): Flush requested on efca89295498edf3132003eebd0ddf83 2024-11-16T19:30:31,297 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing efca89295498edf3132003eebd0ddf83 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T19:30:31,303 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/6be37ac2869941a9a7d8bd5cff65c908 is 1080, key is row0097/info:/1731785431278/Put/seqid=0 2024-11-16T19:30:31,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741857_1033 (size=12516) 2024-11-16T19:30:31,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741857_1033 (size=12516) 2024-11-16T19:30:31,319 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=138 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/6be37ac2869941a9a7d8bd5cff65c908 2024-11-16T19:30:31,328 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/6be37ac2869941a9a7d8bd5cff65c908 as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/6be37ac2869941a9a7d8bd5cff65c908 2024-11-16T19:30:31,336 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/6be37ac2869941a9a7d8bd5cff65c908, entries=7, sequenceid=138, filesize=12.2 K 2024-11-16T19:30:31,338 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for efca89295498edf3132003eebd0ddf83 in 41ms, sequenceid=138, compaction requested=false 2024-11-16T19:30:31,338 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for efca89295498edf3132003eebd0ddf83: 2024-11-16T19:30:31,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41539 {}] regionserver.HRegion(8855): Flush requested on efca89295498edf3132003eebd0ddf83 2024-11-16T19:30:31,339 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing efca89295498edf3132003eebd0ddf83 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-16T19:30:31,358 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/e7002bce356a4ca593f9a99707905142 is 1080, key is row0104/info:/1731785431299/Put/seqid=0 2024-11-16T19:30:31,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741858_1034 (size=20078) 2024-11-16T19:30:31,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741858_1034 (size=20078) 2024-11-16T19:30:31,377 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/e7002bce356a4ca593f9a99707905142 2024-11-16T19:30:31,387 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/e7002bce356a4ca593f9a99707905142 as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/e7002bce356a4ca593f9a99707905142 2024-11-16T19:30:31,397 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/e7002bce356a4ca593f9a99707905142, entries=14, sequenceid=155, filesize=19.6 K 2024-11-16T19:30:31,399 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=11.56 KB/11836 for efca89295498edf3132003eebd0ddf83 in 60ms, sequenceid=155, compaction requested=true 2024-11-16T19:30:31,399 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for efca89295498edf3132003eebd0ddf83: 2024-11-16T19:30:31,399 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store efca89295498edf3132003eebd0ddf83:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T19:30:31,399 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T19:30:31,399 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T19:30:31,404 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 75481 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T19:30:31,405 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HStore(1541): efca89295498edf3132003eebd0ddf83/info is initiating minor compaction (all files) 2024-11-16T19:30:31,405 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of efca89295498edf3132003eebd0ddf83/info in TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83. 2024-11-16T19:30:31,405 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/f7b7bb877c5b4ee88836f9ecba37681f, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/6be37ac2869941a9a7d8bd5cff65c908, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/e7002bce356a4ca593f9a99707905142] into tmpdir=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp, totalSize=73.7 K 2024-11-16T19:30:31,406 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.Compactor(225): Compacting f7b7bb877c5b4ee88836f9ecba37681f, keycount=35, bloomtype=ROW, size=41.9 K, encoding=NONE, compression=NONE, seqNum=124, earliestPutTs=1731785417053 2024-11-16T19:30:31,406 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6be37ac2869941a9a7d8bd5cff65c908, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1731785431278 2024-11-16T19:30:31,407 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.Compactor(225): Compacting e7002bce356a4ca593f9a99707905142, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1731785431299 2024-11-16T19:30:31,432 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): efca89295498edf3132003eebd0ddf83#info#compaction#72 average throughput is 28.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T19:30:31,433 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/cbfa2dc3cb074ff6921dee72281c8290 is 1080, key is row0062/info:/1731785417053/Put/seqid=0 2024-11-16T19:30:31,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741859_1035 (size=65695) 2024-11-16T19:30:31,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741859_1035 (size=65695) 2024-11-16T19:30:31,456 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/cbfa2dc3cb074ff6921dee72281c8290 as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/cbfa2dc3cb074ff6921dee72281c8290 2024-11-16T19:30:31,464 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in efca89295498edf3132003eebd0ddf83/info of efca89295498edf3132003eebd0ddf83 into cbfa2dc3cb074ff6921dee72281c8290(size=64.2 K), total size for store is 64.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T19:30:31,464 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for efca89295498edf3132003eebd0ddf83: 2024-11-16T19:30:31,464 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83., storeName=efca89295498edf3132003eebd0ddf83/info, priority=13, startTime=1731785431399; duration=0sec 2024-11-16T19:30:31,464 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T19:30:31,464 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: efca89295498edf3132003eebd0ddf83:info 2024-11-16T19:30:31,555 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:31,555 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:32,556 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:32,556 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:33,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41539 {}] regionserver.HRegion(8855): Flush requested on efca89295498edf3132003eebd0ddf83 2024-11-16T19:30:33,397 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing efca89295498edf3132003eebd0ddf83 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-16T19:30:33,404 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/0ec557ff8a1946ddac5ecea465b069f8 is 1080, key is row0118/info:/1731785431342/Put/seqid=0 2024-11-16T19:30:33,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741860_1036 (size=17906) 2024-11-16T19:30:33,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741860_1036 (size=17906) 2024-11-16T19:30:33,410 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/0ec557ff8a1946ddac5ecea465b069f8 2024-11-16T19:30:33,416 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/0ec557ff8a1946ddac5ecea465b069f8 as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/0ec557ff8a1946ddac5ecea465b069f8 2024-11-16T19:30:33,422 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/0ec557ff8a1946ddac5ecea465b069f8, entries=12, sequenceid=171, filesize=17.5 K 2024-11-16T19:30:33,423 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for efca89295498edf3132003eebd0ddf83 in 26ms, sequenceid=171, compaction requested=false 2024-11-16T19:30:33,423 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for efca89295498edf3132003eebd0ddf83: 2024-11-16T19:30:33,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41539 {}] regionserver.HRegion(8855): Flush requested on efca89295498edf3132003eebd0ddf83 2024-11-16T19:30:33,424 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing efca89295498edf3132003eebd0ddf83 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-16T19:30:33,427 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/97864fb8304c4796bab58a5178f60127 is 1080, key is row0130/info:/1731785433399/Put/seqid=0 2024-11-16T19:30:33,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741861_1037 (size=16828) 2024-11-16T19:30:33,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741861_1037 (size=16828) 2024-11-16T19:30:33,432 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=185 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/97864fb8304c4796bab58a5178f60127 2024-11-16T19:30:33,438 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/97864fb8304c4796bab58a5178f60127 as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/97864fb8304c4796bab58a5178f60127 2024-11-16T19:30:33,444 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/97864fb8304c4796bab58a5178f60127, entries=11, sequenceid=185, filesize=16.4 K 2024-11-16T19:30:33,445 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=10.51 KB/10760 for efca89295498edf3132003eebd0ddf83 in 21ms, sequenceid=185, compaction requested=true 2024-11-16T19:30:33,445 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for efca89295498edf3132003eebd0ddf83: 2024-11-16T19:30:33,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store efca89295498edf3132003eebd0ddf83:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T19:30:33,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T19:30:33,446 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T19:30:33,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41539 {}] regionserver.HRegion(8855): Flush requested on efca89295498edf3132003eebd0ddf83 2024-11-16T19:30:33,446 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing efca89295498edf3132003eebd0ddf83 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-16T19:30:33,447 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 100429 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T19:30:33,447 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HStore(1541): efca89295498edf3132003eebd0ddf83/info is initiating minor compaction (all files) 2024-11-16T19:30:33,447 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of efca89295498edf3132003eebd0ddf83/info in TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83. 2024-11-16T19:30:33,447 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/cbfa2dc3cb074ff6921dee72281c8290, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/0ec557ff8a1946ddac5ecea465b069f8, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/97864fb8304c4796bab58a5178f60127] into tmpdir=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp, totalSize=98.1 K 2024-11-16T19:30:33,448 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.Compactor(225): Compacting cbfa2dc3cb074ff6921dee72281c8290, keycount=56, bloomtype=ROW, size=64.2 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1731785417053 2024-11-16T19:30:33,448 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0ec557ff8a1946ddac5ecea465b069f8, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1731785431342 2024-11-16T19:30:33,449 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.Compactor(225): Compacting 97864fb8304c4796bab58a5178f60127, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1731785433399 2024-11-16T19:30:33,450 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/e6e7bd448aa14bd69dc3a94c9d87b53f is 1080, key is row0141/info:/1731785433425/Put/seqid=0 2024-11-16T19:30:33,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741862_1038 (size=16828) 2024-11-16T19:30:33,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741862_1038 (size=16828) 2024-11-16T19:30:33,455 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/e6e7bd448aa14bd69dc3a94c9d87b53f 2024-11-16T19:30:33,460 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): efca89295498edf3132003eebd0ddf83#info#compaction#76 average throughput is 27.02 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T19:30:33,461 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/66483f1cb9c3446b9672a1406736c016 is 1080, key is row0062/info:/1731785417053/Put/seqid=0 2024-11-16T19:30:33,462 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/e6e7bd448aa14bd69dc3a94c9d87b53f as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/e6e7bd448aa14bd69dc3a94c9d87b53f 2024-11-16T19:30:33,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741863_1039 (size=90668) 2024-11-16T19:30:33,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741863_1039 (size=90668) 2024-11-16T19:30:33,469 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/e6e7bd448aa14bd69dc3a94c9d87b53f, entries=11, sequenceid=199, filesize=16.4 K 2024-11-16T19:30:33,471 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=9.46 KB/9684 for efca89295498edf3132003eebd0ddf83 in 24ms, sequenceid=199, compaction requested=false 2024-11-16T19:30:33,471 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for efca89295498edf3132003eebd0ddf83: 2024-11-16T19:30:33,473 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/66483f1cb9c3446b9672a1406736c016 as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/66483f1cb9c3446b9672a1406736c016 2024-11-16T19:30:33,480 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in efca89295498edf3132003eebd0ddf83/info of efca89295498edf3132003eebd0ddf83 into 66483f1cb9c3446b9672a1406736c016(size=88.5 K), total size for store is 105.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T19:30:33,480 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for efca89295498edf3132003eebd0ddf83: 2024-11-16T19:30:33,480 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83., storeName=efca89295498edf3132003eebd0ddf83/info, priority=13, startTime=1731785433445; duration=0sec 2024-11-16T19:30:33,480 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T19:30:33,480 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: efca89295498edf3132003eebd0ddf83:info 2024-11-16T19:30:33,557 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:33,557 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:33,923 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T19:30:34,557 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:34,557 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:35,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41539 {}] regionserver.HRegion(8855): Flush requested on efca89295498edf3132003eebd0ddf83 2024-11-16T19:30:35,472 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing efca89295498edf3132003eebd0ddf83 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-16T19:30:35,480 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/be88051668b14de187bf43322aeca44f is 1080, key is row0152/info:/1731785433447/Put/seqid=0 2024-11-16T19:30:35,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741864_1040 (size=15750) 2024-11-16T19:30:35,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741864_1040 (size=15750) 2024-11-16T19:30:35,509 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41539 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=efca89295498edf3132003eebd0ddf83, server=d11ab77873cb,41539,1731785403984 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-16T19:30:35,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41539 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:52850 deadline: 1731785445508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=efca89295498edf3132003eebd0ddf83, server=d11ab77873cb,41539,1731785403984 2024-11-16T19:30:35,510 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83., hostname=d11ab77873cb,41539,1731785403984, seqNum=128 , the old value is region=TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83., hostname=d11ab77873cb,41539,1731785403984, seqNum=128, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=efca89295498edf3132003eebd0ddf83, server=d11ab77873cb,41539,1731785403984 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T19:30:35,510 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83., hostname=d11ab77873cb,41539,1731785403984, seqNum=128 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=efca89295498edf3132003eebd0ddf83, server=d11ab77873cb,41539,1731785403984 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T19:30:35,510 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83., hostname=d11ab77873cb,41539,1731785403984, seqNum=128 because the exception is null or not the one we care about 2024-11-16T19:30:35,559 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:35,559 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:35,887 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/be88051668b14de187bf43322aeca44f 2024-11-16T19:30:35,896 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/be88051668b14de187bf43322aeca44f as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/be88051668b14de187bf43322aeca44f 2024-11-16T19:30:35,905 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/be88051668b14de187bf43322aeca44f, entries=10, sequenceid=213, filesize=15.4 K 2024-11-16T19:30:35,906 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=19.96 KB/20444 for efca89295498edf3132003eebd0ddf83 in 434ms, sequenceid=213, compaction requested=true 2024-11-16T19:30:35,906 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for efca89295498edf3132003eebd0ddf83: 2024-11-16T19:30:35,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store efca89295498edf3132003eebd0ddf83:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T19:30:35,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T19:30:35,906 DEBUG [RS:0;d11ab77873cb:41539-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T19:30:35,908 DEBUG [RS:0;d11ab77873cb:41539-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 123246 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T19:30:35,908 DEBUG [RS:0;d11ab77873cb:41539-longCompactions-0 {}] regionserver.HStore(1541): efca89295498edf3132003eebd0ddf83/info is initiating minor compaction (all files) 2024-11-16T19:30:35,908 INFO [RS:0;d11ab77873cb:41539-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of efca89295498edf3132003eebd0ddf83/info in TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83. 2024-11-16T19:30:35,908 INFO [RS:0;d11ab77873cb:41539-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/66483f1cb9c3446b9672a1406736c016, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/e6e7bd448aa14bd69dc3a94c9d87b53f, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/be88051668b14de187bf43322aeca44f] into tmpdir=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp, totalSize=120.4 K 2024-11-16T19:30:35,908 DEBUG [RS:0;d11ab77873cb:41539-longCompactions-0 {}] compactions.Compactor(225): Compacting 66483f1cb9c3446b9672a1406736c016, keycount=79, bloomtype=ROW, size=88.5 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1731785417053 2024-11-16T19:30:35,909 DEBUG [RS:0;d11ab77873cb:41539-longCompactions-0 {}] compactions.Compactor(225): Compacting e6e7bd448aa14bd69dc3a94c9d87b53f, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1731785433425 2024-11-16T19:30:35,909 DEBUG [RS:0;d11ab77873cb:41539-longCompactions-0 {}] compactions.Compactor(225): Compacting be88051668b14de187bf43322aeca44f, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1731785433447 2024-11-16T19:30:35,924 INFO [RS:0;d11ab77873cb:41539-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): efca89295498edf3132003eebd0ddf83#info#compaction#78 average throughput is 25.65 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T19:30:35,924 DEBUG [RS:0;d11ab77873cb:41539-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/213e65aedf0349b3aa00654894c56306 is 1080, key is row0062/info:/1731785417053/Put/seqid=0 2024-11-16T19:30:35,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741865_1041 (size=113412) 2024-11-16T19:30:35,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741865_1041 (size=113412) 2024-11-16T19:30:35,933 DEBUG [RS:0;d11ab77873cb:41539-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/213e65aedf0349b3aa00654894c56306 as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/213e65aedf0349b3aa00654894c56306 2024-11-16T19:30:35,939 INFO [RS:0;d11ab77873cb:41539-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in efca89295498edf3132003eebd0ddf83/info of efca89295498edf3132003eebd0ddf83 into 213e65aedf0349b3aa00654894c56306(size=110.8 K), total size for store is 110.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T19:30:35,939 DEBUG [RS:0;d11ab77873cb:41539-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for efca89295498edf3132003eebd0ddf83: 2024-11-16T19:30:35,939 INFO [RS:0;d11ab77873cb:41539-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83., storeName=efca89295498edf3132003eebd0ddf83/info, priority=13, startTime=1731785435906; duration=0sec 2024-11-16T19:30:35,939 DEBUG [RS:0;d11ab77873cb:41539-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T19:30:35,939 DEBUG [RS:0;d11ab77873cb:41539-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: efca89295498edf3132003eebd0ddf83:info 2024-11-16T19:30:36,560 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:36,560 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:37,561 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:37,561 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:38,562 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:38,562 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:39,564 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:39,564 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:40,565 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:40,565 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:41,567 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:41,567 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:42,568 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:42,568 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:43,569 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:43,569 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:44,570 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:44,570 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:44,776 INFO [master/d11ab77873cb:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-16T19:30:44,776 INFO [master/d11ab77873cb:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-16T19:30:45,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41539 {}] regionserver.HRegion(8855): Flush requested on efca89295498edf3132003eebd0ddf83 2024-11-16T19:30:45,530 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing efca89295498edf3132003eebd0ddf83 1/1 column families, dataSize=21.02 KB heapSize=22.75 KB 2024-11-16T19:30:45,538 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/98391d553c954002b6e293ee2656fddb is 1080, key is row0162/info:/1731785435475/Put/seqid=0 2024-11-16T19:30:45,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741866_1042 (size=26550) 2024-11-16T19:30:45,545 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=21.02 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/98391d553c954002b6e293ee2656fddb 2024-11-16T19:30:45,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741866_1042 (size=26550) 2024-11-16T19:30:45,552 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/98391d553c954002b6e293ee2656fddb as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/98391d553c954002b6e293ee2656fddb 2024-11-16T19:30:45,556 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41539 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=efca89295498edf3132003eebd0ddf83, server=d11ab77873cb,41539,1731785403984 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-16T19:30:45,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41539 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:52850 deadline: 1731785455555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=efca89295498edf3132003eebd0ddf83, server=d11ab77873cb,41539,1731785403984 2024-11-16T19:30:45,557 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/98391d553c954002b6e293ee2656fddb, entries=20, sequenceid=237, filesize=25.9 K 2024-11-16T19:30:45,557 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83., hostname=d11ab77873cb,41539,1731785403984, seqNum=128 , the old value is region=TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83., hostname=d11ab77873cb,41539,1731785403984, seqNum=128, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=efca89295498edf3132003eebd0ddf83, server=d11ab77873cb,41539,1731785403984 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T19:30:45,557 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83., hostname=d11ab77873cb,41539,1731785403984, seqNum=128 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=efca89295498edf3132003eebd0ddf83, server=d11ab77873cb,41539,1731785403984 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T19:30:45,557 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83., hostname=d11ab77873cb,41539,1731785403984, seqNum=128 because the exception is null or not the one we care about 2024-11-16T19:30:45,558 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~21.02 KB/21520, heapSize ~22.73 KB/23280, currentSize=9.46 KB/9684 for efca89295498edf3132003eebd0ddf83 in 27ms, sequenceid=237, compaction requested=false 2024-11-16T19:30:45,558 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for efca89295498edf3132003eebd0ddf83: 2024-11-16T19:30:45,571 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:45,571 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:46,571 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:46,571 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:47,572 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:47,572 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:48,573 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:48,573 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:49,573 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:49,574 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:49,725 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-11-16T19:30:50,574 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:50,574 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:51,574 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:51,575 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:52,575 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:52,575 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:53,576 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:53,576 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:54,577 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:54,577 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:55,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41539 {}] regionserver.HRegion(8855): Flush requested on efca89295498edf3132003eebd0ddf83 2024-11-16T19:30:55,571 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing efca89295498edf3132003eebd0ddf83 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-16T19:30:55,578 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:55,578 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:55,580 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/762a539c835d4997825dfa55860f53eb is 1080, key is row0182/info:/1731785445532/Put/seqid=0 2024-11-16T19:30:55,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741867_1043 (size=15750) 2024-11-16T19:30:55,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741867_1043 (size=15750) 2024-11-16T19:30:55,587 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/762a539c835d4997825dfa55860f53eb 2024-11-16T19:30:55,593 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/762a539c835d4997825dfa55860f53eb as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/762a539c835d4997825dfa55860f53eb 2024-11-16T19:30:55,597 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/762a539c835d4997825dfa55860f53eb, entries=10, sequenceid=250, filesize=15.4 K 2024-11-16T19:30:55,598 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=1.05 KB/1076 for efca89295498edf3132003eebd0ddf83 in 27ms, sequenceid=250, compaction requested=true 2024-11-16T19:30:55,598 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for efca89295498edf3132003eebd0ddf83: 2024-11-16T19:30:55,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store efca89295498edf3132003eebd0ddf83:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T19:30:55,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T19:30:55,598 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T19:30:55,599 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 155712 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T19:30:55,599 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HStore(1541): efca89295498edf3132003eebd0ddf83/info is initiating minor compaction (all files) 2024-11-16T19:30:55,599 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of efca89295498edf3132003eebd0ddf83/info in TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83. 2024-11-16T19:30:55,599 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/213e65aedf0349b3aa00654894c56306, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/98391d553c954002b6e293ee2656fddb, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/762a539c835d4997825dfa55860f53eb] into tmpdir=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp, totalSize=152.1 K 2024-11-16T19:30:55,600 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.Compactor(225): Compacting 213e65aedf0349b3aa00654894c56306, keycount=100, bloomtype=ROW, size=110.8 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1731785417053 2024-11-16T19:30:55,600 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.Compactor(225): Compacting 98391d553c954002b6e293ee2656fddb, keycount=20, bloomtype=ROW, size=25.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1731785435475 2024-11-16T19:30:55,600 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.Compactor(225): Compacting 762a539c835d4997825dfa55860f53eb, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1731785445532 2024-11-16T19:30:55,610 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): efca89295498edf3132003eebd0ddf83#info#compaction#81 average throughput is 133.40 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T19:30:55,610 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/e33ef24a63f1464196ed53859f13285d is 1080, key is row0062/info:/1731785417053/Put/seqid=0 2024-11-16T19:30:55,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741868_1044 (size=146059) 2024-11-16T19:30:55,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741868_1044 (size=146059) 2024-11-16T19:30:55,620 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/e33ef24a63f1464196ed53859f13285d as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/e33ef24a63f1464196ed53859f13285d 2024-11-16T19:30:55,627 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in efca89295498edf3132003eebd0ddf83/info of efca89295498edf3132003eebd0ddf83 into e33ef24a63f1464196ed53859f13285d(size=142.6 K), total size for store is 142.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T19:30:55,627 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for efca89295498edf3132003eebd0ddf83: 2024-11-16T19:30:55,627 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83., storeName=efca89295498edf3132003eebd0ddf83/info, priority=13, startTime=1731785455598; duration=0sec 2024-11-16T19:30:55,627 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T19:30:55,627 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: efca89295498edf3132003eebd0ddf83:info 2024-11-16T19:30:56,578 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:56,578 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:57,579 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:57,579 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:57,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41539 {}] regionserver.HRegion(8855): Flush requested on efca89295498edf3132003eebd0ddf83 2024-11-16T19:30:57,589 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing efca89295498edf3132003eebd0ddf83 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T19:30:57,593 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/f53a4d07abd244ebab50fa85a2a2ce37 is 1080, key is row0192/info:/1731785455574/Put/seqid=0 2024-11-16T19:30:57,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741869_1045 (size=12521) 2024-11-16T19:30:57,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741869_1045 (size=12521) 2024-11-16T19:30:57,599 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=261 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/f53a4d07abd244ebab50fa85a2a2ce37 2024-11-16T19:30:57,605 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/f53a4d07abd244ebab50fa85a2a2ce37 as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/f53a4d07abd244ebab50fa85a2a2ce37 2024-11-16T19:30:57,611 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/f53a4d07abd244ebab50fa85a2a2ce37, entries=7, sequenceid=261, filesize=12.2 K 2024-11-16T19:30:57,611 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for efca89295498edf3132003eebd0ddf83 in 22ms, sequenceid=261, compaction requested=false 2024-11-16T19:30:57,612 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for efca89295498edf3132003eebd0ddf83: 2024-11-16T19:30:57,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41539 {}] regionserver.HRegion(8855): Flush requested on efca89295498edf3132003eebd0ddf83 2024-11-16T19:30:57,613 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing efca89295498edf3132003eebd0ddf83 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-16T19:30:57,617 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/81e81073b730444eb5a3030988681bb4 is 1080, key is row0199/info:/1731785457590/Put/seqid=0 2024-11-16T19:30:57,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741870_1046 (size=16839) 2024-11-16T19:30:57,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741870_1046 (size=16839) 2024-11-16T19:30:57,622 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/81e81073b730444eb5a3030988681bb4 2024-11-16T19:30:57,628 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/81e81073b730444eb5a3030988681bb4 as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/81e81073b730444eb5a3030988681bb4 2024-11-16T19:30:57,633 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/81e81073b730444eb5a3030988681bb4, entries=11, sequenceid=275, filesize=16.4 K 2024-11-16T19:30:57,634 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=10.51 KB/10760 for efca89295498edf3132003eebd0ddf83 in 21ms, sequenceid=275, compaction requested=true 2024-11-16T19:30:57,635 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for efca89295498edf3132003eebd0ddf83: 2024-11-16T19:30:57,635 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store efca89295498edf3132003eebd0ddf83:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T19:30:57,635 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T19:30:57,635 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T19:30:57,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41539 {}] regionserver.HRegion(8855): Flush requested on efca89295498edf3132003eebd0ddf83 2024-11-16T19:30:57,635 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing efca89295498edf3132003eebd0ddf83 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-16T19:30:57,636 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 175419 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T19:30:57,636 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HStore(1541): efca89295498edf3132003eebd0ddf83/info is initiating minor compaction (all files) 2024-11-16T19:30:57,636 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of efca89295498edf3132003eebd0ddf83/info in TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83. 2024-11-16T19:30:57,636 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/e33ef24a63f1464196ed53859f13285d, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/f53a4d07abd244ebab50fa85a2a2ce37, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/81e81073b730444eb5a3030988681bb4] into tmpdir=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp, totalSize=171.3 K 2024-11-16T19:30:57,637 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.Compactor(225): Compacting e33ef24a63f1464196ed53859f13285d, keycount=130, bloomtype=ROW, size=142.6 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1731785417053 2024-11-16T19:30:57,637 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.Compactor(225): Compacting f53a4d07abd244ebab50fa85a2a2ce37, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1731785455574 2024-11-16T19:30:57,638 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.Compactor(225): Compacting 81e81073b730444eb5a3030988681bb4, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1731785457590 2024-11-16T19:30:57,640 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/f13df36b407d4110a8e339d5d226f97b is 1080, key is row0210/info:/1731785457614/Put/seqid=0 2024-11-16T19:30:57,662 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): efca89295498edf3132003eebd0ddf83#info#compaction#85 average throughput is 37.97 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T19:30:57,663 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/0cba7a44b8b74a0f96e4cdbc77b64c97 is 1080, key is row0062/info:/1731785417053/Put/seqid=0 2024-11-16T19:30:57,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741871_1047 (size=16839) 2024-11-16T19:30:57,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741871_1047 (size=16839) 2024-11-16T19:30:57,668 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/f13df36b407d4110a8e339d5d226f97b 2024-11-16T19:30:57,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741872_1048 (size=165585) 2024-11-16T19:30:57,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741872_1048 (size=165585) 2024-11-16T19:30:57,675 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/f13df36b407d4110a8e339d5d226f97b as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/f13df36b407d4110a8e339d5d226f97b 2024-11-16T19:30:57,679 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/0cba7a44b8b74a0f96e4cdbc77b64c97 as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/0cba7a44b8b74a0f96e4cdbc77b64c97 2024-11-16T19:30:57,687 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/f13df36b407d4110a8e339d5d226f97b, entries=11, sequenceid=289, filesize=16.4 K 2024-11-16T19:30:57,688 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=4.20 KB/4304 for efca89295498edf3132003eebd0ddf83 in 53ms, sequenceid=289, compaction requested=false 2024-11-16T19:30:57,688 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for efca89295498edf3132003eebd0ddf83: 2024-11-16T19:30:57,689 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in efca89295498edf3132003eebd0ddf83/info of efca89295498edf3132003eebd0ddf83 into 0cba7a44b8b74a0f96e4cdbc77b64c97(size=161.7 K), total size for store is 178.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T19:30:57,689 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for efca89295498edf3132003eebd0ddf83: 2024-11-16T19:30:57,689 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83., storeName=efca89295498edf3132003eebd0ddf83/info, priority=13, startTime=1731785457635; duration=0sec 2024-11-16T19:30:57,689 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T19:30:57,689 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: efca89295498edf3132003eebd0ddf83:info 2024-11-16T19:30:58,580 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:58,580 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:59,581 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:59,581 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:30:59,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41539 {}] regionserver.HRegion(8855): Flush requested on efca89295498edf3132003eebd0ddf83 2024-11-16T19:30:59,656 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing efca89295498edf3132003eebd0ddf83 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T19:30:59,661 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/f979e92c38d545d0a259f5bcb69a94e5 is 1080, key is row0221/info:/1731785457637/Put/seqid=0 2024-11-16T19:30:59,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741873_1049 (size=12523) 2024-11-16T19:30:59,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741873_1049 (size=12523) 2024-11-16T19:30:59,675 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=300 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/f979e92c38d545d0a259f5bcb69a94e5 2024-11-16T19:30:59,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/f979e92c38d545d0a259f5bcb69a94e5 as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/f979e92c38d545d0a259f5bcb69a94e5 2024-11-16T19:30:59,687 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/f979e92c38d545d0a259f5bcb69a94e5, entries=7, sequenceid=300, filesize=12.2 K 2024-11-16T19:30:59,688 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9684 for efca89295498edf3132003eebd0ddf83 in 32ms, sequenceid=300, compaction requested=true 2024-11-16T19:30:59,688 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for efca89295498edf3132003eebd0ddf83: 2024-11-16T19:30:59,688 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T19:30:59,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store efca89295498edf3132003eebd0ddf83:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T19:30:59,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T19:30:59,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41539 {}] regionserver.HRegion(8855): Flush requested on efca89295498edf3132003eebd0ddf83 2024-11-16T19:30:59,689 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing efca89295498edf3132003eebd0ddf83 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-16T19:30:59,690 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 194947 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T19:30:59,690 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HStore(1541): efca89295498edf3132003eebd0ddf83/info is initiating minor compaction (all files) 2024-11-16T19:30:59,690 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of efca89295498edf3132003eebd0ddf83/info in TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83. 2024-11-16T19:30:59,690 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/0cba7a44b8b74a0f96e4cdbc77b64c97, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/f13df36b407d4110a8e339d5d226f97b, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/f979e92c38d545d0a259f5bcb69a94e5] into tmpdir=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp, totalSize=190.4 K 2024-11-16T19:30:59,690 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0cba7a44b8b74a0f96e4cdbc77b64c97, keycount=148, bloomtype=ROW, size=161.7 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1731785417053 2024-11-16T19:30:59,691 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.Compactor(225): Compacting f13df36b407d4110a8e339d5d226f97b, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1731785457614 2024-11-16T19:30:59,691 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] compactions.Compactor(225): Compacting f979e92c38d545d0a259f5bcb69a94e5, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1731785457637 2024-11-16T19:30:59,694 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/eb16903933c64541ac9e92bcf14aaae9 is 1080, key is row0228/info:/1731785459658/Put/seqid=0 2024-11-16T19:30:59,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741874_1050 (size=15760) 2024-11-16T19:30:59,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741874_1050 (size=15760) 2024-11-16T19:30:59,708 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=313 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/eb16903933c64541ac9e92bcf14aaae9 2024-11-16T19:30:59,709 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): efca89295498edf3132003eebd0ddf83#info#compaction#88 average throughput is 42.59 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T19:30:59,710 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/5a5745fadbb74a98aeed53bdf699b276 is 1080, key is row0062/info:/1731785417053/Put/seqid=0 2024-11-16T19:30:59,718 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/eb16903933c64541ac9e92bcf14aaae9 as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/eb16903933c64541ac9e92bcf14aaae9 2024-11-16T19:30:59,725 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/eb16903933c64541ac9e92bcf14aaae9, entries=10, sequenceid=313, filesize=15.4 K 2024-11-16T19:30:59,727 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=14.71 KB/15064 for efca89295498edf3132003eebd0ddf83 in 37ms, sequenceid=313, compaction requested=false 2024-11-16T19:30:59,727 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for efca89295498edf3132003eebd0ddf83: 2024-11-16T19:30:59,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41539 {}] regionserver.HRegion(8855): Flush requested on efca89295498edf3132003eebd0ddf83 2024-11-16T19:30:59,728 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing efca89295498edf3132003eebd0ddf83 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-16T19:30:59,732 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/abb35861d89641fc808a18071b7cc9d5 is 1080, key is row0238/info:/1731785459690/Put/seqid=0 2024-11-16T19:30:59,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741875_1051 (size=185113) 2024-11-16T19:30:59,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741875_1051 (size=185113) 2024-11-16T19:30:59,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741876_1052 (size=21171) 2024-11-16T19:30:59,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741876_1052 (size=21171) 2024-11-16T19:30:59,741 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/abb35861d89641fc808a18071b7cc9d5 2024-11-16T19:30:59,744 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/5a5745fadbb74a98aeed53bdf699b276 as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/5a5745fadbb74a98aeed53bdf699b276 2024-11-16T19:30:59,744 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/abb35861d89641fc808a18071b7cc9d5 as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/abb35861d89641fc808a18071b7cc9d5 2024-11-16T19:30:59,749 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/abb35861d89641fc808a18071b7cc9d5, entries=15, sequenceid=331, filesize=20.7 K 2024-11-16T19:30:59,750 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=4.20 KB/4304 for efca89295498edf3132003eebd0ddf83 in 21ms, sequenceid=331, compaction requested=false 2024-11-16T19:30:59,750 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in efca89295498edf3132003eebd0ddf83/info of efca89295498edf3132003eebd0ddf83 into 5a5745fadbb74a98aeed53bdf699b276(size=180.8 K), total size for store is 216.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T19:30:59,750 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for efca89295498edf3132003eebd0ddf83: 2024-11-16T19:30:59,750 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for efca89295498edf3132003eebd0ddf83: 2024-11-16T19:30:59,750 INFO [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83., storeName=efca89295498edf3132003eebd0ddf83/info, priority=13, startTime=1731785459688; duration=0sec 2024-11-16T19:30:59,750 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T19:30:59,750 DEBUG [RS:0;d11ab77873cb:41539-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: efca89295498edf3132003eebd0ddf83:info 2024-11-16T19:31:00,582 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:31:00,582 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:31:01,583 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:31:01,583 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:31:01,738 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-16T19:31:01,739 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C41539%2C1731785403984.1731785461739 2024-11-16T19:31:01,758 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:01,758 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:01,759 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:01,759 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:01,759 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:01,759 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/WALs/d11ab77873cb,41539,1731785403984/d11ab77873cb%2C41539%2C1731785403984.1731785404357 with entries=315, filesize=309.17 KB; new WAL /user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/WALs/d11ab77873cb,41539,1731785403984/d11ab77873cb%2C41539%2C1731785403984.1731785461739 2024-11-16T19:31:01,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741833_1009 (size=316595) 2024-11-16T19:31:01,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741833_1009 (size=316595) 2024-11-16T19:31:01,775 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33277:33277),(127.0.0.1/127.0.0.1:38963:38963)] 2024-11-16T19:31:01,780 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing efca89295498edf3132003eebd0ddf83 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-16T19:31:01,786 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/7de9288b489f4fe09a772e0fdc242ca7 is 1080, key is row0253/info:/1731785459730/Put/seqid=0 2024-11-16T19:31:01,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741878_1054 (size=9278) 2024-11-16T19:31:01,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741878_1054 (size=9278) 2024-11-16T19:31:01,797 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=339 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/7de9288b489f4fe09a772e0fdc242ca7 2024-11-16T19:31:01,805 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/.tmp/info/7de9288b489f4fe09a772e0fdc242ca7 as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/7de9288b489f4fe09a772e0fdc242ca7 2024-11-16T19:31:01,813 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/7de9288b489f4fe09a772e0fdc242ca7, entries=4, sequenceid=339, filesize=9.1 K 2024-11-16T19:31:01,814 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for efca89295498edf3132003eebd0ddf83 in 34ms, sequenceid=339, compaction requested=true 2024-11-16T19:31:01,815 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for efca89295498edf3132003eebd0ddf83: 2024-11-16T19:31:01,815 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-16T19:31:01,826 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/hbase/meta/1588230740/.tmp/info/29681781aca14ad380806b10a652452b is 193, key is TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83./info:regioninfo/1731785420296/Put/seqid=0 2024-11-16T19:31:01,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741879_1055 (size=6223) 2024-11-16T19:31:01,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741879_1055 (size=6223) 2024-11-16T19:31:01,838 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/hbase/meta/1588230740/.tmp/info/29681781aca14ad380806b10a652452b 2024-11-16T19:31:01,847 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/hbase/meta/1588230740/.tmp/info/29681781aca14ad380806b10a652452b as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/hbase/meta/1588230740/info/29681781aca14ad380806b10a652452b 2024-11-16T19:31:01,853 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/hbase/meta/1588230740/info/29681781aca14ad380806b10a652452b, entries=5, sequenceid=21, filesize=6.1 K 2024-11-16T19:31:01,854 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 39ms, sequenceid=21, compaction requested=false 2024-11-16T19:31:01,854 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-16T19:31:01,854 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for f88eb02d4f0e204056c921e0c9eab377: 2024-11-16T19:31:01,855 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C41539%2C1731785403984.1731785461855 2024-11-16T19:31:01,866 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:01,866 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:01,867 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:01,867 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:01,867 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:01,867 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/WALs/d11ab77873cb,41539,1731785403984/d11ab77873cb%2C41539%2C1731785403984.1731785461739 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/WALs/d11ab77873cb,41539,1731785403984/d11ab77873cb%2C41539%2C1731785403984.1731785461855 2024-11-16T19:31:01,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741877_1053 (size=731) 2024-11-16T19:31:01,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741877_1053 (size=731) 2024-11-16T19:31:01,875 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38963:38963),(127.0.0.1/127.0.0.1:33277:33277)] 2024-11-16T19:31:01,876 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/WALs/d11ab77873cb,41539,1731785403984/d11ab77873cb%2C41539%2C1731785403984.1731785404357 to hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/oldWALs/d11ab77873cb%2C41539%2C1731785403984.1731785404357 2024-11-16T19:31:01,876 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T19:31:01,876 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T19:31:01,876 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T19:31:01,877 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T19:31:01,877 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:31:01,877 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/WALs/d11ab77873cb,41539,1731785403984/d11ab77873cb%2C41539%2C1731785403984.1731785461739 to hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/oldWALs/d11ab77873cb%2C41539%2C1731785403984.1731785461739 2024-11-16T19:31:01,877 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:31:01,877 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T19:31:01,877 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T19:31:01,877 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1193938377, stopped=false 2024-11-16T19:31:01,877 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=d11ab77873cb,43337,1731785403938 2024-11-16T19:31:01,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41539-0x1004a038a830001, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T19:31:01,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43337-0x1004a038a830000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T19:31:01,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41539-0x1004a038a830001, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:31:01,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43337-0x1004a038a830000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:31:01,879 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T19:31:01,879 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T19:31:01,879 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T19:31:01,879 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:31:01,879 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41539-0x1004a038a830001, quorum=127.0.0.1:62282, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T19:31:01,879 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'd11ab77873cb,41539,1731785403984' ***** 2024-11-16T19:31:01,879 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T19:31:01,880 INFO [RS:0;d11ab77873cb:41539 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T19:31:01,880 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43337-0x1004a038a830000, quorum=127.0.0.1:62282, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T19:31:01,880 INFO [RS:0;d11ab77873cb:41539 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T19:31:01,880 INFO [RS:0;d11ab77873cb:41539 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T19:31:01,880 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T19:31:01,880 INFO [RS:0;d11ab77873cb:41539 {}] regionserver.HRegionServer(3091): Received CLOSE for efca89295498edf3132003eebd0ddf83 2024-11-16T19:31:01,880 INFO [RS:0;d11ab77873cb:41539 {}] regionserver.HRegionServer(3091): Received CLOSE for f88eb02d4f0e204056c921e0c9eab377 2024-11-16T19:31:01,880 INFO [RS:0;d11ab77873cb:41539 {}] regionserver.HRegionServer(959): stopping server d11ab77873cb,41539,1731785403984 2024-11-16T19:31:01,880 INFO [RS:0;d11ab77873cb:41539 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T19:31:01,880 INFO [RS:0;d11ab77873cb:41539 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;d11ab77873cb:41539. 2024-11-16T19:31:01,880 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing efca89295498edf3132003eebd0ddf83, disabling compactions & flushes 2024-11-16T19:31:01,880 INFO [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83. 2024-11-16T19:31:01,880 DEBUG [RS:0;d11ab77873cb:41539 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T19:31:01,880 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83. 2024-11-16T19:31:01,880 DEBUG [RS:0;d11ab77873cb:41539 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:31:01,880 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83. after waiting 0 ms 2024-11-16T19:31:01,880 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83. 2024-11-16T19:31:01,880 INFO [RS:0;d11ab77873cb:41539 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T19:31:01,880 INFO [RS:0;d11ab77873cb:41539 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T19:31:01,880 INFO [RS:0;d11ab77873cb:41539 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T19:31:01,880 INFO [RS:0;d11ab77873cb:41539 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T19:31:01,885 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/1c83772e17e2450abee9792923aa63f6.44d75487f1e80d2d12c5e413fac668aa->hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/1c83772e17e2450abee9792923aa63f6-top, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/f7b7bb877c5b4ee88836f9ecba37681f, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/TestLogRolling-testLogRolling=44d75487f1e80d2d12c5e413fac668aa-2ea02c2a2db4499e86404cc6e3654774, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/6be37ac2869941a9a7d8bd5cff65c908, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/cbfa2dc3cb074ff6921dee72281c8290, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/e7002bce356a4ca593f9a99707905142, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/0ec557ff8a1946ddac5ecea465b069f8, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/66483f1cb9c3446b9672a1406736c016, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/97864fb8304c4796bab58a5178f60127, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/e6e7bd448aa14bd69dc3a94c9d87b53f, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/213e65aedf0349b3aa00654894c56306, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/be88051668b14de187bf43322aeca44f, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/98391d553c954002b6e293ee2656fddb, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/e33ef24a63f1464196ed53859f13285d, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/762a539c835d4997825dfa55860f53eb, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/f53a4d07abd244ebab50fa85a2a2ce37, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/0cba7a44b8b74a0f96e4cdbc77b64c97, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/81e81073b730444eb5a3030988681bb4, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/f13df36b407d4110a8e339d5d226f97b, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/f979e92c38d545d0a259f5bcb69a94e5] to archive 2024-11-16T19:31:01,887 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T19:31:01,888 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/1c83772e17e2450abee9792923aa63f6.44d75487f1e80d2d12c5e413fac668aa to hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/archive/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/1c83772e17e2450abee9792923aa63f6.44d75487f1e80d2d12c5e413fac668aa 2024-11-16T19:31:01,889 INFO [RS:0;d11ab77873cb:41539 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-16T19:31:01,889 DEBUG [RS:0;d11ab77873cb:41539 {}] regionserver.HRegionServer(1325): Online Regions={efca89295498edf3132003eebd0ddf83=TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83., 1588230740=hbase:meta,,1.1588230740, f88eb02d4f0e204056c921e0c9eab377=TestLogRolling-testLogRolling,,1731785419579.f88eb02d4f0e204056c921e0c9eab377.} 2024-11-16T19:31:01,889 DEBUG [RS:0;d11ab77873cb:41539 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, efca89295498edf3132003eebd0ddf83, f88eb02d4f0e204056c921e0c9eab377 2024-11-16T19:31:01,889 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T19:31:01,890 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T19:31:01,890 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T19:31:01,890 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T19:31:01,890 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T19:31:01,891 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/f7b7bb877c5b4ee88836f9ecba37681f to hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/archive/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/f7b7bb877c5b4ee88836f9ecba37681f 2024-11-16T19:31:01,892 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/TestLogRolling-testLogRolling=44d75487f1e80d2d12c5e413fac668aa-2ea02c2a2db4499e86404cc6e3654774 to hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/archive/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/TestLogRolling-testLogRolling=44d75487f1e80d2d12c5e413fac668aa-2ea02c2a2db4499e86404cc6e3654774 2024-11-16T19:31:01,894 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/6be37ac2869941a9a7d8bd5cff65c908 to hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/archive/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/6be37ac2869941a9a7d8bd5cff65c908 2024-11-16T19:31:01,896 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/cbfa2dc3cb074ff6921dee72281c8290 to hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/archive/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/cbfa2dc3cb074ff6921dee72281c8290 2024-11-16T19:31:01,898 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/e7002bce356a4ca593f9a99707905142 to hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/archive/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/e7002bce356a4ca593f9a99707905142 2024-11-16T19:31:01,899 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/0ec557ff8a1946ddac5ecea465b069f8 to hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/archive/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/0ec557ff8a1946ddac5ecea465b069f8 2024-11-16T19:31:01,901 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/66483f1cb9c3446b9672a1406736c016 to hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/archive/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/66483f1cb9c3446b9672a1406736c016 2024-11-16T19:31:01,903 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/97864fb8304c4796bab58a5178f60127 to hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/archive/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/97864fb8304c4796bab58a5178f60127 2024-11-16T19:31:01,906 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/e6e7bd448aa14bd69dc3a94c9d87b53f to hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/archive/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/e6e7bd448aa14bd69dc3a94c9d87b53f 2024-11-16T19:31:01,909 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/213e65aedf0349b3aa00654894c56306 to hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/archive/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/213e65aedf0349b3aa00654894c56306 2024-11-16T19:31:01,911 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/be88051668b14de187bf43322aeca44f to hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/archive/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/be88051668b14de187bf43322aeca44f 2024-11-16T19:31:01,913 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/98391d553c954002b6e293ee2656fddb to hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/archive/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/98391d553c954002b6e293ee2656fddb 2024-11-16T19:31:01,914 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-16T19:31:01,915 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T19:31:01,915 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T19:31:01,915 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731785461889Running coprocessor pre-close hooks at 1731785461889Disabling compacts and flushes for region at 1731785461889Disabling writes for close at 1731785461890 (+1 ms)Writing region close event to WAL at 1731785461906 (+16 ms)Running coprocessor post-close hooks at 1731785461914 (+8 ms)Closed at 1731785461915 (+1 ms) 2024-11-16T19:31:01,915 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T19:31:01,916 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/e33ef24a63f1464196ed53859f13285d to hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/archive/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/e33ef24a63f1464196ed53859f13285d 2024-11-16T19:31:01,917 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/762a539c835d4997825dfa55860f53eb to hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/archive/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/762a539c835d4997825dfa55860f53eb 2024-11-16T19:31:01,919 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/f53a4d07abd244ebab50fa85a2a2ce37 to hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/archive/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/f53a4d07abd244ebab50fa85a2a2ce37 2024-11-16T19:31:01,921 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/0cba7a44b8b74a0f96e4cdbc77b64c97 to hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/archive/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/0cba7a44b8b74a0f96e4cdbc77b64c97 2024-11-16T19:31:01,923 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/81e81073b730444eb5a3030988681bb4 to hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/archive/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/81e81073b730444eb5a3030988681bb4 2024-11-16T19:31:01,924 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/f13df36b407d4110a8e339d5d226f97b to hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/archive/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/f13df36b407d4110a8e339d5d226f97b 2024-11-16T19:31:01,926 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/f979e92c38d545d0a259f5bcb69a94e5 to hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/archive/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/info/f979e92c38d545d0a259f5bcb69a94e5 2024-11-16T19:31:01,926 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=d11ab77873cb:43337 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-16T19:31:01,927 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [f7b7bb877c5b4ee88836f9ecba37681f=42887, 6be37ac2869941a9a7d8bd5cff65c908=12516, cbfa2dc3cb074ff6921dee72281c8290=65695, e7002bce356a4ca593f9a99707905142=20078, 0ec557ff8a1946ddac5ecea465b069f8=17906, 66483f1cb9c3446b9672a1406736c016=90668, 97864fb8304c4796bab58a5178f60127=16828, e6e7bd448aa14bd69dc3a94c9d87b53f=16828, 213e65aedf0349b3aa00654894c56306=113412, be88051668b14de187bf43322aeca44f=15750, 98391d553c954002b6e293ee2656fddb=26550, e33ef24a63f1464196ed53859f13285d=146059, 762a539c835d4997825dfa55860f53eb=15750, f53a4d07abd244ebab50fa85a2a2ce37=12521, 0cba7a44b8b74a0f96e4cdbc77b64c97=165585, 81e81073b730444eb5a3030988681bb4=16839, f13df36b407d4110a8e339d5d226f97b=16839, f979e92c38d545d0a259f5bcb69a94e5=12523] 2024-11-16T19:31:01,933 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/efca89295498edf3132003eebd0ddf83/recovered.edits/342.seqid, newMaxSeqId=342, maxSeqId=127 2024-11-16T19:31:01,934 INFO [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83. 2024-11-16T19:31:01,934 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for efca89295498edf3132003eebd0ddf83: Waiting for close lock at 1731785461880Running coprocessor pre-close hooks at 1731785461880Disabling compacts and flushes for region at 1731785461880Disabling writes for close at 1731785461880Writing region close event to WAL at 1731785461929 (+49 ms)Running coprocessor post-close hooks at 1731785461934 (+5 ms)Closed at 1731785461934 2024-11-16T19:31:01,934 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731785419579.efca89295498edf3132003eebd0ddf83. 2024-11-16T19:31:01,934 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing f88eb02d4f0e204056c921e0c9eab377, disabling compactions & flushes 2024-11-16T19:31:01,934 INFO [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731785419579.f88eb02d4f0e204056c921e0c9eab377. 2024-11-16T19:31:01,934 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731785419579.f88eb02d4f0e204056c921e0c9eab377. 2024-11-16T19:31:01,934 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731785419579.f88eb02d4f0e204056c921e0c9eab377. after waiting 0 ms 2024-11-16T19:31:01,934 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731785419579.f88eb02d4f0e204056c921e0c9eab377. 2024-11-16T19:31:01,937 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731785419579.f88eb02d4f0e204056c921e0c9eab377.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/f88eb02d4f0e204056c921e0c9eab377/info/1c83772e17e2450abee9792923aa63f6.44d75487f1e80d2d12c5e413fac668aa->hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/44d75487f1e80d2d12c5e413fac668aa/info/1c83772e17e2450abee9792923aa63f6-bottom] to archive 2024-11-16T19:31:01,939 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731785419579.f88eb02d4f0e204056c921e0c9eab377.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T19:31:01,940 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731785419579.f88eb02d4f0e204056c921e0c9eab377.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/f88eb02d4f0e204056c921e0c9eab377/info/1c83772e17e2450abee9792923aa63f6.44d75487f1e80d2d12c5e413fac668aa to hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/archive/data/default/TestLogRolling-testLogRolling/f88eb02d4f0e204056c921e0c9eab377/info/1c83772e17e2450abee9792923aa63f6.44d75487f1e80d2d12c5e413fac668aa 2024-11-16T19:31:01,941 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731785419579.f88eb02d4f0e204056c921e0c9eab377.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-16T19:31:01,946 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/data/default/TestLogRolling-testLogRolling/f88eb02d4f0e204056c921e0c9eab377/recovered.edits/132.seqid, newMaxSeqId=132, maxSeqId=127 2024-11-16T19:31:01,947 INFO [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731785419579.f88eb02d4f0e204056c921e0c9eab377. 2024-11-16T19:31:01,947 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for f88eb02d4f0e204056c921e0c9eab377: Waiting for close lock at 1731785461934Running coprocessor pre-close hooks at 1731785461934Disabling compacts and flushes for region at 1731785461934Disabling writes for close at 1731785461934Writing region close event to WAL at 1731785461942 (+8 ms)Running coprocessor post-close hooks at 1731785461947 (+5 ms)Closed at 1731785461947 2024-11-16T19:31:01,947 DEBUG [RS_CLOSE_REGION-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731785419579.f88eb02d4f0e204056c921e0c9eab377. 2024-11-16T19:31:02,089 INFO [RS:0;d11ab77873cb:41539 {}] regionserver.HRegionServer(976): stopping server d11ab77873cb,41539,1731785403984; all regions closed. 2024-11-16T19:31:02,090 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:02,090 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:02,090 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:02,090 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:02,090 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:02,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741834_1010 (size=8107) 2024-11-16T19:31:02,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741834_1010 (size=8107) 2024-11-16T19:31:02,094 DEBUG [RS:0;d11ab77873cb:41539 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/oldWALs 2024-11-16T19:31:02,094 INFO [RS:0;d11ab77873cb:41539 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d11ab77873cb%2C41539%2C1731785403984.meta:.meta(num 1731785404710) 2024-11-16T19:31:02,095 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:02,095 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:02,095 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:02,095 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:02,095 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:02,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741880_1056 (size=780) 2024-11-16T19:31:02,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741880_1056 (size=780) 2024-11-16T19:31:02,100 DEBUG [RS:0;d11ab77873cb:41539 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/oldWALs 2024-11-16T19:31:02,100 INFO [RS:0;d11ab77873cb:41539 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d11ab77873cb%2C41539%2C1731785403984:(num 1731785461855) 2024-11-16T19:31:02,100 DEBUG [RS:0;d11ab77873cb:41539 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:31:02,100 INFO [RS:0;d11ab77873cb:41539 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T19:31:02,100 INFO [RS:0;d11ab77873cb:41539 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T19:31:02,100 INFO [RS:0;d11ab77873cb:41539 {}] hbase.ChoreService(370): Chore service for: regionserver/d11ab77873cb:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-16T19:31:02,100 INFO [RS:0;d11ab77873cb:41539 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T19:31:02,100 INFO [RS:0;d11ab77873cb:41539 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41539 2024-11-16T19:31:02,101 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T19:31:02,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41539-0x1004a038a830001, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/d11ab77873cb,41539,1731785403984 2024-11-16T19:31:02,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43337-0x1004a038a830000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T19:31:02,103 INFO [RS:0;d11ab77873cb:41539 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T19:31:02,103 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [d11ab77873cb,41539,1731785403984] 2024-11-16T19:31:02,104 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/d11ab77873cb,41539,1731785403984 already deleted, retry=false 2024-11-16T19:31:02,104 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; d11ab77873cb,41539,1731785403984 expired; onlineServers=0 2024-11-16T19:31:02,104 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'd11ab77873cb,43337,1731785403938' ***** 2024-11-16T19:31:02,104 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T19:31:02,104 INFO [M:0;d11ab77873cb:43337 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T19:31:02,104 INFO [M:0;d11ab77873cb:43337 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T19:31:02,105 DEBUG [M:0;d11ab77873cb:43337 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T19:31:02,105 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T19:31:02,105 DEBUG [M:0;d11ab77873cb:43337 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T19:31:02,105 DEBUG [master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.large.0-1731785404109 {}] cleaner.HFileCleaner(306): Exit Thread[master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.large.0-1731785404109,5,FailOnTimeoutGroup] 2024-11-16T19:31:02,105 DEBUG [master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.small.0-1731785404109 {}] cleaner.HFileCleaner(306): Exit Thread[master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.small.0-1731785404109,5,FailOnTimeoutGroup] 2024-11-16T19:31:02,105 INFO [M:0;d11ab77873cb:43337 {}] hbase.ChoreService(370): Chore service for: master/d11ab77873cb:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T19:31:02,105 INFO [M:0;d11ab77873cb:43337 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T19:31:02,105 DEBUG [M:0;d11ab77873cb:43337 {}] master.HMaster(1795): Stopping service threads 2024-11-16T19:31:02,105 INFO [M:0;d11ab77873cb:43337 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T19:31:02,105 INFO [M:0;d11ab77873cb:43337 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T19:31:02,106 INFO [M:0;d11ab77873cb:43337 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T19:31:02,106 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T19:31:02,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43337-0x1004a038a830000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T19:31:02,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43337-0x1004a038a830000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:31:02,107 DEBUG [M:0;d11ab77873cb:43337 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-16T19:31:02,107 DEBUG [M:0;d11ab77873cb:43337 {}] master.ActiveMasterManager(353): master:43337-0x1004a038a830000, quorum=127.0.0.1:62282, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-16T19:31:02,108 INFO [M:0;d11ab77873cb:43337 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/.lastflushedseqids 2024-11-16T19:31:02,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741881_1057 (size=228) 2024-11-16T19:31:02,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741881_1057 (size=228) 2024-11-16T19:31:02,118 INFO [M:0;d11ab77873cb:43337 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T19:31:02,118 INFO [M:0;d11ab77873cb:43337 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T19:31:02,118 DEBUG [M:0;d11ab77873cb:43337 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T19:31:02,118 INFO [M:0;d11ab77873cb:43337 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:31:02,118 DEBUG [M:0;d11ab77873cb:43337 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:31:02,118 DEBUG [M:0;d11ab77873cb:43337 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T19:31:02,118 DEBUG [M:0;d11ab77873cb:43337 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:31:02,119 INFO [M:0;d11ab77873cb:43337 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.43 KB heapSize=63.38 KB 2024-11-16T19:31:02,138 DEBUG [M:0;d11ab77873cb:43337 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f8efd2f97ffb4a2e858aa7544297a457 is 82, key is hbase:meta,,1/info:regioninfo/1731785404744/Put/seqid=0 2024-11-16T19:31:02,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741882_1058 (size=5672) 2024-11-16T19:31:02,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741882_1058 (size=5672) 2024-11-16T19:31:02,143 INFO [M:0;d11ab77873cb:43337 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f8efd2f97ffb4a2e858aa7544297a457 2024-11-16T19:31:02,174 DEBUG [M:0;d11ab77873cb:43337 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/34e0a14a00104ff0a05381151641c1d2 is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731785405181/Put/seqid=0 2024-11-16T19:31:02,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741883_1059 (size=7090) 2024-11-16T19:31:02,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741883_1059 (size=7090) 2024-11-16T19:31:02,181 INFO [M:0;d11ab77873cb:43337 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.83 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/34e0a14a00104ff0a05381151641c1d2 2024-11-16T19:31:02,186 INFO [M:0;d11ab77873cb:43337 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 34e0a14a00104ff0a05381151641c1d2 2024-11-16T19:31:02,201 DEBUG [M:0;d11ab77873cb:43337 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b411a2d1631643088ea0ad55def96ba1 is 69, key is d11ab77873cb,41539,1731785403984/rs:state/1731785404217/Put/seqid=0 2024-11-16T19:31:02,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41539-0x1004a038a830001, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T19:31:02,204 INFO [RS:0;d11ab77873cb:41539 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T19:31:02,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41539-0x1004a038a830001, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T19:31:02,204 INFO [RS:0;d11ab77873cb:41539 {}] regionserver.HRegionServer(1031): Exiting; stopping=d11ab77873cb,41539,1731785403984; zookeeper connection closed. 2024-11-16T19:31:02,204 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2c23f9bb {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2c23f9bb 2024-11-16T19:31:02,204 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T19:31:02,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741884_1060 (size=5156) 2024-11-16T19:31:02,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741884_1060 (size=5156) 2024-11-16T19:31:02,206 INFO [M:0;d11ab77873cb:43337 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b411a2d1631643088ea0ad55def96ba1 2024-11-16T19:31:02,228 DEBUG [M:0;d11ab77873cb:43337 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/eeffd19f35684414a98b6029baf99058 is 52, key is load_balancer_on/state:d/1731785404809/Put/seqid=0 2024-11-16T19:31:02,232 INFO [regionserver/d11ab77873cb:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T19:31:02,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741885_1061 (size=5056) 2024-11-16T19:31:02,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741885_1061 (size=5056) 2024-11-16T19:31:02,241 INFO [M:0;d11ab77873cb:43337 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/eeffd19f35684414a98b6029baf99058 2024-11-16T19:31:02,248 DEBUG [M:0;d11ab77873cb:43337 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f8efd2f97ffb4a2e858aa7544297a457 as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f8efd2f97ffb4a2e858aa7544297a457 2024-11-16T19:31:02,255 INFO [M:0;d11ab77873cb:43337 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f8efd2f97ffb4a2e858aa7544297a457, entries=8, sequenceid=125, filesize=5.5 K 2024-11-16T19:31:02,257 DEBUG [M:0;d11ab77873cb:43337 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/34e0a14a00104ff0a05381151641c1d2 as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/34e0a14a00104ff0a05381151641c1d2 2024-11-16T19:31:02,263 INFO [M:0;d11ab77873cb:43337 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 34e0a14a00104ff0a05381151641c1d2 2024-11-16T19:31:02,263 INFO [M:0;d11ab77873cb:43337 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/34e0a14a00104ff0a05381151641c1d2, entries=13, sequenceid=125, filesize=6.9 K 2024-11-16T19:31:02,265 DEBUG [M:0;d11ab77873cb:43337 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b411a2d1631643088ea0ad55def96ba1 as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b411a2d1631643088ea0ad55def96ba1 2024-11-16T19:31:02,272 INFO [M:0;d11ab77873cb:43337 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b411a2d1631643088ea0ad55def96ba1, entries=1, sequenceid=125, filesize=5.0 K 2024-11-16T19:31:02,273 DEBUG [M:0;d11ab77873cb:43337 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/eeffd19f35684414a98b6029baf99058 as hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/eeffd19f35684414a98b6029baf99058 2024-11-16T19:31:02,279 INFO [M:0;d11ab77873cb:43337 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/3e0053c7-a7ed-a08e-6271-1b94ee6ddf45/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/eeffd19f35684414a98b6029baf99058, entries=1, sequenceid=125, filesize=4.9 K 2024-11-16T19:31:02,280 INFO [M:0;d11ab77873cb:43337 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.32 KB/64840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 162ms, sequenceid=125, compaction requested=false 2024-11-16T19:31:02,285 INFO [M:0;d11ab77873cb:43337 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:31:02,285 DEBUG [M:0;d11ab77873cb:43337 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731785462118Disabling compacts and flushes for region at 1731785462118Disabling writes for close at 1731785462118Obtaining lock to block concurrent updates at 1731785462119 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731785462119Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52663, getHeapSize=64840, getOffHeapSize=0, getCellsCount=148 at 1731785462119Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731785462120 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731785462120Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731785462137 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731785462137Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731785462153 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731785462174 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731785462174Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731785462186 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731785462200 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731785462200Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731785462209 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731785462228 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731785462228Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@17b7ff3b: reopening flushed file at 1731785462247 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7a9600d4: reopening flushed file at 1731785462256 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7700b39e: reopening flushed file at 1731785462263 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@813c732: reopening flushed file at 1731785462272 (+9 ms)Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.32 KB/64840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 162ms, sequenceid=125, compaction requested=false at 1731785462280 (+8 ms)Writing region close event to WAL at 1731785462285 (+5 ms)Closed at 1731785462285 2024-11-16T19:31:02,287 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:02,287 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:02,287 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:02,287 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:02,287 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:02,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41019 is added to blk_1073741830_1006 (size=61332) 2024-11-16T19:31:02,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37181 is added to blk_1073741830_1006 (size=61332) 2024-11-16T19:31:02,291 INFO [M:0;d11ab77873cb:43337 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T19:31:02,291 INFO [M:0;d11ab77873cb:43337 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43337 2024-11-16T19:31:02,291 INFO [M:0;d11ab77873cb:43337 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T19:31:02,291 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T19:31:02,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43337-0x1004a038a830000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T19:31:02,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43337-0x1004a038a830000, quorum=127.0.0.1:62282, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T19:31:02,396 INFO [M:0;d11ab77873cb:43337 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T19:31:02,402 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@9612b29{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:31:02,402 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@314e7370{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T19:31:02,403 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T19:31:02,403 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1dab95de{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T19:31:02,403 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7305dd28{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/hadoop.log.dir/,STOPPED} 2024-11-16T19:31:02,405 WARN [BP-1326534687-172.17.0.2-1731785403184 heartbeating to localhost/127.0.0.1:45363 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T19:31:02,405 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T19:31:02,405 WARN [BP-1326534687-172.17.0.2-1731785403184 heartbeating to localhost/127.0.0.1:45363 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1326534687-172.17.0.2-1731785403184 (Datanode Uuid 8ed6f7a9-36bf-46a3-b402-ddf46d21a5b5) service to localhost/127.0.0.1:45363 2024-11-16T19:31:02,405 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T19:31:02,406 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/cluster_aadf2298-c241-b4a0-4c5f-d80730f5b5fd/data/data3/current/BP-1326534687-172.17.0.2-1731785403184 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:31:02,406 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/cluster_aadf2298-c241-b4a0-4c5f-d80730f5b5fd/data/data4/current/BP-1326534687-172.17.0.2-1731785403184 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:31:02,407 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T19:31:02,415 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@45890504{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:31:02,415 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3d639fc0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T19:31:02,415 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T19:31:02,415 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@53cff5cb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T19:31:02,416 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@a48d3d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/hadoop.log.dir/,STOPPED} 2024-11-16T19:31:02,418 WARN [BP-1326534687-172.17.0.2-1731785403184 heartbeating to localhost/127.0.0.1:45363 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T19:31:02,418 WARN [BP-1326534687-172.17.0.2-1731785403184 heartbeating to localhost/127.0.0.1:45363 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1326534687-172.17.0.2-1731785403184 (Datanode Uuid 4389e0b7-99f0-46ab-8cb7-1f75eace8c24) service to localhost/127.0.0.1:45363 2024-11-16T19:31:02,419 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/cluster_aadf2298-c241-b4a0-4c5f-d80730f5b5fd/data/data1/current/BP-1326534687-172.17.0.2-1731785403184 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:31:02,419 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/cluster_aadf2298-c241-b4a0-4c5f-d80730f5b5fd/data/data2/current/BP-1326534687-172.17.0.2-1731785403184 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:31:02,419 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T19:31:02,419 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T19:31:02,420 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T19:31:02,428 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@52d230c9{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T19:31:02,428 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@240fc28c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T19:31:02,429 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T19:31:02,429 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@56433553{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T19:31:02,429 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10ce7a76{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/hadoop.log.dir/,STOPPED} 2024-11-16T19:31:02,438 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T19:31:02,471 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T19:31:02,485 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=232 (was 207) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45363 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45363 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:45363 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45363 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45363 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45363 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45363 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45363 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45363 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=517 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=134 (was 83) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3180 (was 1978) - AvailableMemoryMB LEAK? - 2024-11-16T19:31:02,496 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=232, OpenFileDescriptor=517, MaxFileDescriptor=1048576, SystemLoadAverage=134, ProcessCount=11, AvailableMemoryMB=3179 2024-11-16T19:31:02,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T19:31:02,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/hadoop.log.dir so I do NOT create it in target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec 2024-11-16T19:31:02,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2bc1c9ee-6e74-ad36-83dc-494f55eae53c/hadoop.tmp.dir so I do NOT create it in target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec 2024-11-16T19:31:02,497 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/cluster_705530ed-101b-4d64-497c-510121ec9330, deleteOnExit=true 2024-11-16T19:31:02,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T19:31:02,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/test.cache.data in system properties and HBase conf 2024-11-16T19:31:02,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T19:31:02,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/hadoop.log.dir in system properties and HBase conf 2024-11-16T19:31:02,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T19:31:02,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T19:31:02,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T19:31:02,497 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T19:31:02,498 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T19:31:02,498 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T19:31:02,498 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T19:31:02,498 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T19:31:02,498 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T19:31:02,498 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T19:31:02,498 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T19:31:02,498 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T19:31:02,498 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T19:31:02,498 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/nfs.dump.dir in system properties and HBase conf 2024-11-16T19:31:02,498 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/java.io.tmpdir in system properties and HBase conf 2024-11-16T19:31:02,498 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T19:31:02,498 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T19:31:02,498 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T19:31:02,515 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T19:31:02,577 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:31:02,581 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T19:31:02,583 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:31:02,584 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:31:02,588 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T19:31:02,588 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T19:31:02,588 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T19:31:02,589 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:31:02,592 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@eedc0c0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/hadoop.log.dir/,AVAILABLE} 2024-11-16T19:31:02,592 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@93c51d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T19:31:02,712 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1e07b1d{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/java.io.tmpdir/jetty-localhost-35719-hadoop-hdfs-3_4_1-tests_jar-_-any-8579119651614667718/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T19:31:02,713 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@18bd2805{HTTP/1.1, (http/1.1)}{localhost:35719} 2024-11-16T19:31:02,713 INFO [Time-limited test {}] server.Server(415): Started @289367ms 2024-11-16T19:31:02,725 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T19:31:02,774 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:31:02,777 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T19:31:02,779 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T19:31:02,779 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T19:31:02,779 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T19:31:02,780 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@76ccace4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/hadoop.log.dir/,AVAILABLE} 2024-11-16T19:31:02,780 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1d359c98{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T19:31:02,899 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@18478920{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/java.io.tmpdir/jetty-localhost-46217-hadoop-hdfs-3_4_1-tests_jar-_-any-7541748347065775677/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:31:02,900 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@72ee40d2{HTTP/1.1, (http/1.1)}{localhost:46217} 2024-11-16T19:31:02,900 INFO [Time-limited test {}] server.Server(415): Started @289554ms 2024-11-16T19:31:02,902 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T19:31:02,947 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T19:31:02,953 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T19:31:02,957 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T19:31:02,957 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T19:31:02,957 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T19:31:02,960 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@637d6eb9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/hadoop.log.dir/,AVAILABLE} 2024-11-16T19:31:02,961 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4edfb46d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T19:31:02,989 WARN [Thread-2478 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/cluster_705530ed-101b-4d64-497c-510121ec9330/data/data2/current/BP-2095152159-172.17.0.2-1731785462519/current, will proceed with Du for space computation calculation, 2024-11-16T19:31:02,989 WARN [Thread-2477 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/cluster_705530ed-101b-4d64-497c-510121ec9330/data/data1/current/BP-2095152159-172.17.0.2-1731785462519/current, will proceed with Du for space computation calculation, 2024-11-16T19:31:03,024 WARN [Thread-2456 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T19:31:03,027 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x669209168cd66290 with lease ID 0x498e2029b2e83b35: Processing first storage report for DS-6dbef69f-d853-4b5f-bb2f-ab3f1b9d3792 from datanode DatanodeRegistration(127.0.0.1:42679, datanodeUuid=1d1779f2-ca86-41a0-b714-01ed0c8077e5, infoPort=40223, infoSecurePort=0, ipcPort=44939, storageInfo=lv=-57;cid=testClusterID;nsid=1821471073;c=1731785462519) 2024-11-16T19:31:03,028 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x669209168cd66290 with lease ID 0x498e2029b2e83b35: from storage DS-6dbef69f-d853-4b5f-bb2f-ab3f1b9d3792 node DatanodeRegistration(127.0.0.1:42679, datanodeUuid=1d1779f2-ca86-41a0-b714-01ed0c8077e5, infoPort=40223, infoSecurePort=0, ipcPort=44939, storageInfo=lv=-57;cid=testClusterID;nsid=1821471073;c=1731785462519), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:31:03,028 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x669209168cd66290 with lease ID 0x498e2029b2e83b35: Processing first storage report for DS-142be494-1f84-4b39-8734-b5d2400d2f70 from datanode DatanodeRegistration(127.0.0.1:42679, datanodeUuid=1d1779f2-ca86-41a0-b714-01ed0c8077e5, infoPort=40223, infoSecurePort=0, ipcPort=44939, storageInfo=lv=-57;cid=testClusterID;nsid=1821471073;c=1731785462519) 2024-11-16T19:31:03,028 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x669209168cd66290 with lease ID 0x498e2029b2e83b35: from storage DS-142be494-1f84-4b39-8734-b5d2400d2f70 node DatanodeRegistration(127.0.0.1:42679, datanodeUuid=1d1779f2-ca86-41a0-b714-01ed0c8077e5, infoPort=40223, infoSecurePort=0, ipcPort=44939, storageInfo=lv=-57;cid=testClusterID;nsid=1821471073;c=1731785462519), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:31:03,088 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2b13a29{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/java.io.tmpdir/jetty-localhost-42993-hadoop-hdfs-3_4_1-tests_jar-_-any-11464228053716179331/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:31:03,088 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7b4a20b3{HTTP/1.1, (http/1.1)}{localhost:42993} 2024-11-16T19:31:03,088 INFO [Time-limited test {}] server.Server(415): Started @289743ms 2024-11-16T19:31:03,090 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T19:31:03,150 WARN [Thread-2503 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/cluster_705530ed-101b-4d64-497c-510121ec9330/data/data3/current/BP-2095152159-172.17.0.2-1731785462519/current, will proceed with Du for space computation calculation, 2024-11-16T19:31:03,151 WARN [Thread-2504 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/cluster_705530ed-101b-4d64-497c-510121ec9330/data/data4/current/BP-2095152159-172.17.0.2-1731785462519/current, will proceed with Du for space computation calculation, 2024-11-16T19:31:03,172 WARN [Thread-2492 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T19:31:03,175 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x812b73e9a6a1a5c2 with lease ID 0x498e2029b2e83b36: Processing first storage report for DS-0c450d1c-45dc-4194-8417-c5d88a815909 from datanode DatanodeRegistration(127.0.0.1:35753, datanodeUuid=6faf8100-7518-4565-aeef-57015578d340, infoPort=38877, infoSecurePort=0, ipcPort=42387, storageInfo=lv=-57;cid=testClusterID;nsid=1821471073;c=1731785462519) 2024-11-16T19:31:03,175 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x812b73e9a6a1a5c2 with lease ID 0x498e2029b2e83b36: from storage DS-0c450d1c-45dc-4194-8417-c5d88a815909 node DatanodeRegistration(127.0.0.1:35753, datanodeUuid=6faf8100-7518-4565-aeef-57015578d340, infoPort=38877, infoSecurePort=0, ipcPort=42387, storageInfo=lv=-57;cid=testClusterID;nsid=1821471073;c=1731785462519), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T19:31:03,175 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x812b73e9a6a1a5c2 with lease ID 0x498e2029b2e83b36: Processing first storage report for DS-a82facb8-164d-46b7-8dfb-8362a8f60699 from datanode DatanodeRegistration(127.0.0.1:35753, datanodeUuid=6faf8100-7518-4565-aeef-57015578d340, infoPort=38877, infoSecurePort=0, ipcPort=42387, storageInfo=lv=-57;cid=testClusterID;nsid=1821471073;c=1731785462519) 2024-11-16T19:31:03,175 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x812b73e9a6a1a5c2 with lease ID 0x498e2029b2e83b36: from storage DS-a82facb8-164d-46b7-8dfb-8362a8f60699 node DatanodeRegistration(127.0.0.1:35753, datanodeUuid=6faf8100-7518-4565-aeef-57015578d340, infoPort=38877, infoSecurePort=0, ipcPort=42387, storageInfo=lv=-57;cid=testClusterID;nsid=1821471073;c=1731785462519), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T19:31:03,215 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec 2024-11-16T19:31:03,217 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/cluster_705530ed-101b-4d64-497c-510121ec9330/zookeeper_0, clientPort=49769, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/cluster_705530ed-101b-4d64-497c-510121ec9330/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/cluster_705530ed-101b-4d64-497c-510121ec9330/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T19:31:03,220 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49769 2024-11-16T19:31:03,220 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:31:03,221 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:31:03,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42679 is added to blk_1073741825_1001 (size=7) 2024-11-16T19:31:03,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35753 is added to blk_1073741825_1001 (size=7) 2024-11-16T19:31:03,234 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71 with version=8 2024-11-16T19:31:03,235 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33243/user/jenkins/test-data/508a698b-0968-760b-ef19-9a666701d43a/hbase-staging 2024-11-16T19:31:03,237 INFO [Time-limited test {}] client.ConnectionUtils(128): master/d11ab77873cb:0 server-side Connection retries=45 2024-11-16T19:31:03,237 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T19:31:03,237 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T19:31:03,237 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T19:31:03,237 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T19:31:03,237 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T19:31:03,237 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T19:31:03,237 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T19:31:03,238 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43641 2024-11-16T19:31:03,239 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43641 connecting to ZooKeeper ensemble=127.0.0.1:49769 2024-11-16T19:31:03,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:436410x0, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T19:31:03,245 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43641-0x1004a04721f0000 connected 2024-11-16T19:31:03,269 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:31:03,270 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:31:03,274 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43641-0x1004a04721f0000, quorum=127.0.0.1:49769, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T19:31:03,274 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71, hbase.cluster.distributed=false 2024-11-16T19:31:03,276 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43641-0x1004a04721f0000, quorum=127.0.0.1:49769, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T19:31:03,276 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43641 2024-11-16T19:31:03,276 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43641 2024-11-16T19:31:03,277 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43641 2024-11-16T19:31:03,277 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43641 2024-11-16T19:31:03,277 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43641 2024-11-16T19:31:03,290 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/d11ab77873cb:0 server-side Connection retries=45 2024-11-16T19:31:03,290 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T19:31:03,290 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T19:31:03,290 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T19:31:03,290 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T19:31:03,290 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T19:31:03,290 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T19:31:03,290 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T19:31:03,290 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43107 2024-11-16T19:31:03,291 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43107 connecting to ZooKeeper ensemble=127.0.0.1:49769 2024-11-16T19:31:03,292 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:31:03,294 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:31:03,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:431070x0, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T19:31:03,297 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43107-0x1004a04721f0001 connected 2024-11-16T19:31:03,297 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43107-0x1004a04721f0001, quorum=127.0.0.1:49769, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T19:31:03,298 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T19:31:03,302 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T19:31:03,303 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43107-0x1004a04721f0001, quorum=127.0.0.1:49769, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T19:31:03,304 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43107-0x1004a04721f0001, quorum=127.0.0.1:49769, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T19:31:03,305 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43107 2024-11-16T19:31:03,305 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43107 2024-11-16T19:31:03,305 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43107 2024-11-16T19:31:03,309 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43107 2024-11-16T19:31:03,309 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43107 2024-11-16T19:31:03,320 DEBUG [M:0;d11ab77873cb:43641 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;d11ab77873cb:43641 2024-11-16T19:31:03,321 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/d11ab77873cb,43641,1731785463237 2024-11-16T19:31:03,322 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43641-0x1004a04721f0000, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T19:31:03,322 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43107-0x1004a04721f0001, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T19:31:03,323 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43641-0x1004a04721f0000, quorum=127.0.0.1:49769, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/d11ab77873cb,43641,1731785463237 2024-11-16T19:31:03,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43641-0x1004a04721f0000, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:31:03,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43107-0x1004a04721f0001, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T19:31:03,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43107-0x1004a04721f0001, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:31:03,324 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43641-0x1004a04721f0000, quorum=127.0.0.1:49769, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T19:31:03,324 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/d11ab77873cb,43641,1731785463237 from backup master directory 2024-11-16T19:31:03,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43107-0x1004a04721f0001, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T19:31:03,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43641-0x1004a04721f0000, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/d11ab77873cb,43641,1731785463237 2024-11-16T19:31:03,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43641-0x1004a04721f0000, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T19:31:03,325 WARN [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T19:31:03,325 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=d11ab77873cb,43641,1731785463237 2024-11-16T19:31:03,329 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/hbase.id] with ID: f214d950-f0eb-414e-b526-d5d9e245293b 2024-11-16T19:31:03,329 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/.tmp/hbase.id 2024-11-16T19:31:03,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35753 is added to blk_1073741826_1002 (size=42) 2024-11-16T19:31:03,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42679 is added to blk_1073741826_1002 (size=42) 2024-11-16T19:31:03,334 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/.tmp/hbase.id]:[hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/hbase.id] 2024-11-16T19:31:03,345 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:31:03,345 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T19:31:03,346 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-16T19:31:03,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43641-0x1004a04721f0000, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:31:03,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43107-0x1004a04721f0001, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:31:03,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35753 is added to blk_1073741827_1003 (size=196) 2024-11-16T19:31:03,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42679 is added to blk_1073741827_1003 (size=196) 2024-11-16T19:31:03,356 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T19:31:03,356 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T19:31:03,357 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T19:31:03,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35753 is added to blk_1073741828_1004 (size=1189) 2024-11-16T19:31:03,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42679 is added to blk_1073741828_1004 (size=1189) 2024-11-16T19:31:03,364 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/MasterData/data/master/store 2024-11-16T19:31:03,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35753 is added to blk_1073741829_1005 (size=34) 2024-11-16T19:31:03,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42679 is added to blk_1073741829_1005 (size=34) 2024-11-16T19:31:03,370 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:31:03,371 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T19:31:03,371 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:31:03,371 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:31:03,371 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T19:31:03,371 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:31:03,371 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:31:03,371 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731785463371Disabling compacts and flushes for region at 1731785463371Disabling writes for close at 1731785463371Writing region close event to WAL at 1731785463371Closed at 1731785463371 2024-11-16T19:31:03,372 WARN [master/d11ab77873cb:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/MasterData/data/master/store/.initializing 2024-11-16T19:31:03,372 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/MasterData/WALs/d11ab77873cb,43641,1731785463237 2024-11-16T19:31:03,374 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d11ab77873cb%2C43641%2C1731785463237, suffix=, logDir=hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/MasterData/WALs/d11ab77873cb,43641,1731785463237, archiveDir=hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/MasterData/oldWALs, maxLogs=10 2024-11-16T19:31:03,374 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C43641%2C1731785463237.1731785463374 2024-11-16T19:31:03,379 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/MasterData/WALs/d11ab77873cb,43641,1731785463237/d11ab77873cb%2C43641%2C1731785463237.1731785463374 2024-11-16T19:31:03,379 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40223:40223),(127.0.0.1/127.0.0.1:38877:38877)] 2024-11-16T19:31:03,380 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T19:31:03,380 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:31:03,380 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:31:03,380 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:31:03,381 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:31:03,383 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T19:31:03,383 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:31:03,383 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:31:03,383 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:31:03,384 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T19:31:03,384 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:31:03,385 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T19:31:03,385 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:31:03,386 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T19:31:03,386 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:31:03,386 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T19:31:03,386 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:31:03,387 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T19:31:03,387 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:31:03,388 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T19:31:03,388 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:31:03,389 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:31:03,389 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:31:03,390 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:31:03,390 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:31:03,390 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T19:31:03,391 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T19:31:03,393 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T19:31:03,394 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=786564, jitterRate=1.685023307800293E-4}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T19:31:03,395 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731785463380Initializing all the Stores at 1731785463381 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785463381Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785463381Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785463381Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785463381Cleaning up temporary data from old regions at 1731785463390 (+9 ms)Region opened successfully at 1731785463394 (+4 ms) 2024-11-16T19:31:03,395 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T19:31:03,397 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@260f0449, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d11ab77873cb/172.17.0.2:0 2024-11-16T19:31:03,398 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T19:31:03,399 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T19:31:03,399 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T19:31:03,399 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T19:31:03,399 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-16T19:31:03,400 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T19:31:03,400 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T19:31:03,404 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T19:31:03,404 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43641-0x1004a04721f0000, quorum=127.0.0.1:49769, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T19:31:03,405 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T19:31:03,405 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T19:31:03,406 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43641-0x1004a04721f0000, quorum=127.0.0.1:49769, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T19:31:03,407 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T19:31:03,407 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T19:31:03,408 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43641-0x1004a04721f0000, quorum=127.0.0.1:49769, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T19:31:03,408 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T19:31:03,409 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43641-0x1004a04721f0000, quorum=127.0.0.1:49769, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T19:31:03,410 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T19:31:03,412 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43641-0x1004a04721f0000, quorum=127.0.0.1:49769, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T19:31:03,412 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T19:31:03,413 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43641-0x1004a04721f0000, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T19:31:03,413 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43107-0x1004a04721f0001, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T19:31:03,413 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43107-0x1004a04721f0001, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:31:03,413 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43641-0x1004a04721f0000, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:31:03,414 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=d11ab77873cb,43641,1731785463237, sessionid=0x1004a04721f0000, setting cluster-up flag (Was=false) 2024-11-16T19:31:03,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43641-0x1004a04721f0000, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:31:03,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43107-0x1004a04721f0001, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:31:03,418 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T19:31:03,419 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d11ab77873cb,43641,1731785463237 2024-11-16T19:31:03,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43107-0x1004a04721f0001, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:31:03,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43641-0x1004a04721f0000, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:31:03,423 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T19:31:03,424 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d11ab77873cb,43641,1731785463237 2024-11-16T19:31:03,425 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T19:31:03,427 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T19:31:03,427 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T19:31:03,428 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T19:31:03,428 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: d11ab77873cb,43641,1731785463237 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T19:31:03,429 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/d11ab77873cb:0, corePoolSize=5, maxPoolSize=5 2024-11-16T19:31:03,429 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/d11ab77873cb:0, corePoolSize=5, maxPoolSize=5 2024-11-16T19:31:03,429 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/d11ab77873cb:0, corePoolSize=5, maxPoolSize=5 2024-11-16T19:31:03,429 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/d11ab77873cb:0, corePoolSize=5, maxPoolSize=5 2024-11-16T19:31:03,429 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/d11ab77873cb:0, corePoolSize=10, maxPoolSize=10 2024-11-16T19:31:03,429 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:31:03,430 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/d11ab77873cb:0, corePoolSize=2, maxPoolSize=2 2024-11-16T19:31:03,430 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:31:03,432 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T19:31:03,432 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731785493432 2024-11-16T19:31:03,432 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T19:31:03,432 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T19:31:03,433 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T19:31:03,433 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T19:31:03,433 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T19:31:03,433 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T19:31:03,433 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T19:31:03,433 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:31:03,434 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T19:31:03,437 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T19:31:03,438 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T19:31:03,438 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T19:31:03,438 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T19:31:03,438 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T19:31:03,439 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T19:31:03,439 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.large.0-1731785463439,5,FailOnTimeoutGroup] 2024-11-16T19:31:03,441 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.small.0-1731785463439,5,FailOnTimeoutGroup] 2024-11-16T19:31:03,441 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T19:31:03,441 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T19:31:03,441 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T19:31:03,441 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T19:31:03,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42679 is added to blk_1073741831_1007 (size=1321) 2024-11-16T19:31:03,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35753 is added to blk_1073741831_1007 (size=1321) 2024-11-16T19:31:03,447 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T19:31:03,448 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71 2024-11-16T19:31:03,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42679 is added to blk_1073741832_1008 (size=32) 2024-11-16T19:31:03,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35753 is added to blk_1073741832_1008 (size=32) 2024-11-16T19:31:03,455 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:31:03,457 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T19:31:03,459 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T19:31:03,459 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:31:03,459 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:31:03,459 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T19:31:03,461 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T19:31:03,461 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:31:03,462 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:31:03,462 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T19:31:03,463 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T19:31:03,463 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:31:03,464 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:31:03,464 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T19:31:03,465 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T19:31:03,465 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:31:03,466 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:31:03,466 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T19:31:03,469 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/data/hbase/meta/1588230740 2024-11-16T19:31:03,474 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/data/hbase/meta/1588230740 2024-11-16T19:31:03,475 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T19:31:03,475 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T19:31:03,476 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T19:31:03,477 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T19:31:03,479 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T19:31:03,480 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=868058, jitterRate=0.10379324853420258}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T19:31:03,481 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731785463456Initializing all the Stores at 1731785463456Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785463457 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785463457Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785463457Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785463457Cleaning up temporary data from old regions at 1731785463475 (+18 ms)Region opened successfully at 1731785463481 (+6 ms) 2024-11-16T19:31:03,481 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T19:31:03,481 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T19:31:03,481 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T19:31:03,481 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T19:31:03,481 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T19:31:03,482 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T19:31:03,482 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731785463481Disabling compacts and flushes for region at 1731785463481Disabling writes for close at 1731785463481Writing region close event to WAL at 1731785463482 (+1 ms)Closed at 1731785463482 2024-11-16T19:31:03,483 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T19:31:03,483 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T19:31:03,483 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T19:31:03,485 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T19:31:03,485 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T19:31:03,511 INFO [RS:0;d11ab77873cb:43107 {}] regionserver.HRegionServer(746): ClusterId : f214d950-f0eb-414e-b526-d5d9e245293b 2024-11-16T19:31:03,511 DEBUG [RS:0;d11ab77873cb:43107 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T19:31:03,513 DEBUG [RS:0;d11ab77873cb:43107 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T19:31:03,513 DEBUG [RS:0;d11ab77873cb:43107 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T19:31:03,514 DEBUG [RS:0;d11ab77873cb:43107 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T19:31:03,514 DEBUG [RS:0;d11ab77873cb:43107 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d6ebd44, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d11ab77873cb/172.17.0.2:0 2024-11-16T19:31:03,525 DEBUG [RS:0;d11ab77873cb:43107 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;d11ab77873cb:43107 2024-11-16T19:31:03,525 INFO [RS:0;d11ab77873cb:43107 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T19:31:03,525 INFO [RS:0;d11ab77873cb:43107 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T19:31:03,525 DEBUG [RS:0;d11ab77873cb:43107 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T19:31:03,526 INFO [RS:0;d11ab77873cb:43107 {}] regionserver.HRegionServer(2659): reportForDuty to master=d11ab77873cb,43641,1731785463237 with port=43107, startcode=1731785463289 2024-11-16T19:31:03,526 DEBUG [RS:0;d11ab77873cb:43107 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T19:31:03,528 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55745, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T19:31:03,529 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43641 {}] master.ServerManager(363): Checking decommissioned status of RegionServer d11ab77873cb,43107,1731785463289 2024-11-16T19:31:03,529 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43641 {}] master.ServerManager(517): Registering regionserver=d11ab77873cb,43107,1731785463289 2024-11-16T19:31:03,530 DEBUG [RS:0;d11ab77873cb:43107 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71 2024-11-16T19:31:03,530 DEBUG [RS:0;d11ab77873cb:43107 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39065 2024-11-16T19:31:03,530 DEBUG [RS:0;d11ab77873cb:43107 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T19:31:03,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43641-0x1004a04721f0000, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T19:31:03,532 DEBUG [RS:0;d11ab77873cb:43107 {}] zookeeper.ZKUtil(111): regionserver:43107-0x1004a04721f0001, quorum=127.0.0.1:49769, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/d11ab77873cb,43107,1731785463289 2024-11-16T19:31:03,532 WARN [RS:0;d11ab77873cb:43107 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T19:31:03,532 INFO [RS:0;d11ab77873cb:43107 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T19:31:03,532 DEBUG [RS:0;d11ab77873cb:43107 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/WALs/d11ab77873cb,43107,1731785463289 2024-11-16T19:31:03,534 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [d11ab77873cb,43107,1731785463289] 2024-11-16T19:31:03,540 INFO [RS:0;d11ab77873cb:43107 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T19:31:03,544 INFO [RS:0;d11ab77873cb:43107 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T19:31:03,545 INFO [RS:0;d11ab77873cb:43107 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T19:31:03,545 INFO [RS:0;d11ab77873cb:43107 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T19:31:03,546 INFO [RS:0;d11ab77873cb:43107 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T19:31:03,547 INFO [RS:0;d11ab77873cb:43107 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T19:31:03,547 INFO [RS:0;d11ab77873cb:43107 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T19:31:03,547 DEBUG [RS:0;d11ab77873cb:43107 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:31:03,547 DEBUG [RS:0;d11ab77873cb:43107 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:31:03,547 DEBUG [RS:0;d11ab77873cb:43107 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:31:03,547 DEBUG [RS:0;d11ab77873cb:43107 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:31:03,547 DEBUG [RS:0;d11ab77873cb:43107 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:31:03,547 DEBUG [RS:0;d11ab77873cb:43107 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/d11ab77873cb:0, corePoolSize=2, maxPoolSize=2 2024-11-16T19:31:03,547 DEBUG [RS:0;d11ab77873cb:43107 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:31:03,547 DEBUG [RS:0;d11ab77873cb:43107 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:31:03,547 DEBUG [RS:0;d11ab77873cb:43107 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:31:03,547 DEBUG [RS:0;d11ab77873cb:43107 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:31:03,547 DEBUG [RS:0;d11ab77873cb:43107 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:31:03,547 DEBUG [RS:0;d11ab77873cb:43107 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/d11ab77873cb:0, corePoolSize=1, maxPoolSize=1 2024-11-16T19:31:03,548 DEBUG [RS:0;d11ab77873cb:43107 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/d11ab77873cb:0, corePoolSize=3, maxPoolSize=3 2024-11-16T19:31:03,548 DEBUG [RS:0;d11ab77873cb:43107 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/d11ab77873cb:0, corePoolSize=3, maxPoolSize=3 2024-11-16T19:31:03,548 INFO [RS:0;d11ab77873cb:43107 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T19:31:03,548 INFO [RS:0;d11ab77873cb:43107 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T19:31:03,548 INFO [RS:0;d11ab77873cb:43107 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T19:31:03,548 INFO [RS:0;d11ab77873cb:43107 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T19:31:03,548 INFO [RS:0;d11ab77873cb:43107 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T19:31:03,548 INFO [RS:0;d11ab77873cb:43107 {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,43107,1731785463289-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T19:31:03,571 INFO [RS:0;d11ab77873cb:43107 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T19:31:03,571 INFO [RS:0;d11ab77873cb:43107 {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,43107,1731785463289-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T19:31:03,571 INFO [RS:0;d11ab77873cb:43107 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:31:03,571 INFO [RS:0;d11ab77873cb:43107 {}] regionserver.Replication(171): d11ab77873cb,43107,1731785463289 started 2024-11-16T19:31:03,584 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:31:03,585 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:31:03,592 INFO [RS:0;d11ab77873cb:43107 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:31:03,592 INFO [RS:0;d11ab77873cb:43107 {}] regionserver.HRegionServer(1482): Serving as d11ab77873cb,43107,1731785463289, RpcServer on d11ab77873cb/172.17.0.2:43107, sessionid=0x1004a04721f0001 2024-11-16T19:31:03,593 DEBUG [RS:0;d11ab77873cb:43107 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T19:31:03,593 DEBUG [RS:0;d11ab77873cb:43107 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager d11ab77873cb,43107,1731785463289 2024-11-16T19:31:03,593 DEBUG [RS:0;d11ab77873cb:43107 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd11ab77873cb,43107,1731785463289' 2024-11-16T19:31:03,593 DEBUG [RS:0;d11ab77873cb:43107 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T19:31:03,593 DEBUG [RS:0;d11ab77873cb:43107 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T19:31:03,594 DEBUG [RS:0;d11ab77873cb:43107 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T19:31:03,594 DEBUG [RS:0;d11ab77873cb:43107 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T19:31:03,594 DEBUG [RS:0;d11ab77873cb:43107 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager d11ab77873cb,43107,1731785463289 2024-11-16T19:31:03,594 DEBUG [RS:0;d11ab77873cb:43107 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd11ab77873cb,43107,1731785463289' 2024-11-16T19:31:03,594 DEBUG [RS:0;d11ab77873cb:43107 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T19:31:03,594 DEBUG [RS:0;d11ab77873cb:43107 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T19:31:03,594 DEBUG [RS:0;d11ab77873cb:43107 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T19:31:03,594 INFO [RS:0;d11ab77873cb:43107 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T19:31:03,595 INFO [RS:0;d11ab77873cb:43107 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T19:31:03,636 WARN [d11ab77873cb:43641 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-16T19:31:03,696 INFO [RS:0;d11ab77873cb:43107 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d11ab77873cb%2C43107%2C1731785463289, suffix=, logDir=hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/WALs/d11ab77873cb,43107,1731785463289, archiveDir=hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/oldWALs, maxLogs=32 2024-11-16T19:31:03,697 INFO [RS:0;d11ab77873cb:43107 {}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C43107%2C1731785463289.1731785463697 2024-11-16T19:31:03,708 INFO [RS:0;d11ab77873cb:43107 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/WALs/d11ab77873cb,43107,1731785463289/d11ab77873cb%2C43107%2C1731785463289.1731785463697 2024-11-16T19:31:03,713 DEBUG [RS:0;d11ab77873cb:43107 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40223:40223),(127.0.0.1/127.0.0.1:38877:38877)] 2024-11-16T19:31:03,886 DEBUG [d11ab77873cb:43641 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T19:31:03,887 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=d11ab77873cb,43107,1731785463289 2024-11-16T19:31:03,888 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d11ab77873cb,43107,1731785463289, state=OPENING 2024-11-16T19:31:03,889 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T19:31:03,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43641-0x1004a04721f0000, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:31:03,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43107-0x1004a04721f0001, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:31:03,891 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T19:31:03,891 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=d11ab77873cb,43107,1731785463289}] 2024-11-16T19:31:03,893 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T19:31:03,893 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T19:31:04,045 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T19:31:04,047 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52451, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T19:31:04,051 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T19:31:04,051 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T19:31:04,052 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d11ab77873cb%2C43107%2C1731785463289.meta, suffix=.meta, logDir=hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/WALs/d11ab77873cb,43107,1731785463289, archiveDir=hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/oldWALs, maxLogs=32 2024-11-16T19:31:04,053 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor d11ab77873cb%2C43107%2C1731785463289.meta.1731785464053.meta 2024-11-16T19:31:04,064 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/WALs/d11ab77873cb,43107,1731785463289/d11ab77873cb%2C43107%2C1731785463289.meta.1731785464053.meta 2024-11-16T19:31:04,067 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38877:38877),(127.0.0.1/127.0.0.1:40223:40223)] 2024-11-16T19:31:04,079 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T19:31:04,079 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T19:31:04,079 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T19:31:04,079 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T19:31:04,079 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T19:31:04,079 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T19:31:04,079 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T19:31:04,079 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T19:31:04,081 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T19:31:04,082 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T19:31:04,082 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:31:04,082 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:31:04,082 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T19:31:04,083 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T19:31:04,083 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:31:04,084 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:31:04,084 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T19:31:04,085 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T19:31:04,085 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:31:04,085 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:31:04,085 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T19:31:04,086 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T19:31:04,086 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T19:31:04,086 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T19:31:04,086 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T19:31:04,087 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/data/hbase/meta/1588230740 2024-11-16T19:31:04,088 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/data/hbase/meta/1588230740 2024-11-16T19:31:04,089 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T19:31:04,089 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T19:31:04,089 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T19:31:04,091 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T19:31:04,092 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=847875, jitterRate=0.07812920212745667}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T19:31:04,092 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T19:31:04,093 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731785464079Writing region info on filesystem at 1731785464079Initializing all the Stores at 1731785464080 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785464080Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785464081 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731785464081Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731785464081Cleaning up temporary data from old regions at 1731785464089 (+8 ms)Running coprocessor post-open hooks at 1731785464092 (+3 ms)Region opened successfully at 1731785464093 (+1 ms) 2024-11-16T19:31:04,094 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731785464045 2024-11-16T19:31:04,096 DEBUG [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T19:31:04,096 INFO [RS_OPEN_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T19:31:04,097 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=d11ab77873cb,43107,1731785463289 2024-11-16T19:31:04,097 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d11ab77873cb,43107,1731785463289, state=OPEN 2024-11-16T19:31:04,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43641-0x1004a04721f0000, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T19:31:04,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43107-0x1004a04721f0001, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T19:31:04,099 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=d11ab77873cb,43107,1731785463289 2024-11-16T19:31:04,100 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T19:31:04,100 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T19:31:04,102 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T19:31:04,102 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=d11ab77873cb,43107,1731785463289 in 208 msec 2024-11-16T19:31:04,104 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T19:31:04,104 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 619 msec 2024-11-16T19:31:04,104 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T19:31:04,105 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T19:31:04,106 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T19:31:04,106 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d11ab77873cb,43107,1731785463289, seqNum=-1] 2024-11-16T19:31:04,106 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T19:31:04,108 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43763, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T19:31:04,112 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 685 msec 2024-11-16T19:31:04,112 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731785464112, completionTime=-1 2024-11-16T19:31:04,113 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T19:31:04,113 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-16T19:31:04,115 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-16T19:31:04,115 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731785524115 2024-11-16T19:31:04,115 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731785584115 2024-11-16T19:31:04,115 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-16T19:31:04,115 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,43641,1731785463237-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T19:31:04,115 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,43641,1731785463237-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:31:04,115 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,43641,1731785463237-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:31:04,115 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-d11ab77873cb:43641, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:31:04,115 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T19:31:04,116 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T19:31:04,117 DEBUG [master/d11ab77873cb:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T19:31:04,120 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.795sec 2024-11-16T19:31:04,120 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T19:31:04,121 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T19:31:04,121 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T19:31:04,121 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T19:31:04,121 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T19:31:04,121 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,43641,1731785463237-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T19:31:04,121 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,43641,1731785463237-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T19:31:04,125 DEBUG [master/d11ab77873cb:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T19:31:04,125 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T19:31:04,125 INFO [master/d11ab77873cb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d11ab77873cb,43641,1731785463237-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T19:31:04,211 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ef29692, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T19:31:04,212 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request d11ab77873cb,43641,-1 for getting cluster id 2024-11-16T19:31:04,212 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T19:31:04,213 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f214d950-f0eb-414e-b526-d5d9e245293b' 2024-11-16T19:31:04,213 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T19:31:04,213 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f214d950-f0eb-414e-b526-d5d9e245293b" 2024-11-16T19:31:04,214 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@770fcd5f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T19:31:04,214 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [d11ab77873cb,43641,-1] 2024-11-16T19:31:04,214 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T19:31:04,214 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:31:04,215 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55314, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T19:31:04,216 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20dd8a9c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T19:31:04,216 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T19:31:04,217 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d11ab77873cb,43107,1731785463289, seqNum=-1] 2024-11-16T19:31:04,217 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T19:31:04,218 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43656, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T19:31:04,220 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=d11ab77873cb,43641,1731785463237 2024-11-16T19:31:04,220 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T19:31:04,223 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T19:31:04,223 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T19:31:04,225 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/WALs/test.com,8080,1, archiveDir=hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/oldWALs, maxLogs=32 2024-11-16T19:31:04,226 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731785464226 2024-11-16T19:31:04,231 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/WALs/test.com,8080,1/test.com%2C8080%2C1.1731785464226 2024-11-16T19:31:04,232 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40223:40223),(127.0.0.1/127.0.0.1:38877:38877)] 2024-11-16T19:31:04,233 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731785464233 2024-11-16T19:31:04,237 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:04,237 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:04,237 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:04,237 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:04,237 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:04,237 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/WALs/test.com,8080,1/test.com%2C8080%2C1.1731785464226 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/WALs/test.com,8080,1/test.com%2C8080%2C1.1731785464233 2024-11-16T19:31:04,239 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38877:38877),(127.0.0.1/127.0.0.1:40223:40223)] 2024-11-16T19:31:04,239 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/WALs/test.com,8080,1/test.com%2C8080%2C1.1731785464226 is not closed yet, will try archiving it next time 2024-11-16T19:31:04,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35753 is added to blk_1073741835_1011 (size=93) 2024-11-16T19:31:04,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42679 is added to blk_1073741835_1011 (size=93) 2024-11-16T19:31:04,239 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:04,239 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:04,239 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:04,239 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:04,239 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:04,240 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/WALs/test.com,8080,1/test.com%2C8080%2C1.1731785464226 to hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/oldWALs/test.com%2C8080%2C1.1731785464226 2024-11-16T19:31:04,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42679 is added to blk_1073741836_1012 (size=93) 2024-11-16T19:31:04,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35753 is added to blk_1073741836_1012 (size=93) 2024-11-16T19:31:04,243 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/oldWALs 2024-11-16T19:31:04,243 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731785464233) 2024-11-16T19:31:04,243 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T19:31:04,243 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T19:31:04,243 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T19:31:04,243 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:31:04,243 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:31:04,243 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T19:31:04,244 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T19:31:04,244 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1523024428, stopped=false 2024-11-16T19:31:04,244 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=d11ab77873cb,43641,1731785463237 2024-11-16T19:31:04,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43641-0x1004a04721f0000, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T19:31:04,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43107-0x1004a04721f0001, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T19:31:04,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43641-0x1004a04721f0000, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:31:04,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43107-0x1004a04721f0001, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:31:04,245 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T19:31:04,245 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T19:31:04,245 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T19:31:04,245 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:31:04,245 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'd11ab77873cb,43107,1731785463289' ***** 2024-11-16T19:31:04,245 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T19:31:04,246 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43107-0x1004a04721f0001, quorum=127.0.0.1:49769, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T19:31:04,249 INFO [RS:0;d11ab77873cb:43107 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T19:31:04,249 INFO [RS:0;d11ab77873cb:43107 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T19:31:04,249 INFO [RS:0;d11ab77873cb:43107 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T19:31:04,249 INFO [RS:0;d11ab77873cb:43107 {}] regionserver.HRegionServer(959): stopping server d11ab77873cb,43107,1731785463289 2024-11-16T19:31:04,249 INFO [RS:0;d11ab77873cb:43107 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T19:31:04,249 INFO [RS:0;d11ab77873cb:43107 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;d11ab77873cb:43107. 2024-11-16T19:31:04,249 DEBUG [RS:0;d11ab77873cb:43107 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T19:31:04,249 DEBUG [RS:0;d11ab77873cb:43107 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:31:04,250 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43641-0x1004a04721f0000, quorum=127.0.0.1:49769, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T19:31:04,250 INFO [RS:0;d11ab77873cb:43107 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T19:31:04,250 INFO [RS:0;d11ab77873cb:43107 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T19:31:04,250 INFO [RS:0;d11ab77873cb:43107 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T19:31:04,250 INFO [RS:0;d11ab77873cb:43107 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T19:31:04,250 INFO [RS:0;d11ab77873cb:43107 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-16T19:31:04,250 DEBUG [RS:0;d11ab77873cb:43107 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-16T19:31:04,250 DEBUG [RS:0;d11ab77873cb:43107 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-16T19:31:04,250 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T19:31:04,250 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T19:31:04,250 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T19:31:04,250 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T19:31:04,250 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T19:31:04,251 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-16T19:31:04,252 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T19:31:04,267 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/data/hbase/meta/1588230740/.tmp/ns/a78e09365ce745be96dc2c8824c8fd1a is 43, key is default/ns:d/1731785464108/Put/seqid=0 2024-11-16T19:31:04,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35753 is added to blk_1073741837_1013 (size=5153) 2024-11-16T19:31:04,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42679 is added to blk_1073741837_1013 (size=5153) 2024-11-16T19:31:04,274 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/data/hbase/meta/1588230740/.tmp/ns/a78e09365ce745be96dc2c8824c8fd1a 2024-11-16T19:31:04,280 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/data/hbase/meta/1588230740/.tmp/ns/a78e09365ce745be96dc2c8824c8fd1a as hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/data/hbase/meta/1588230740/ns/a78e09365ce745be96dc2c8824c8fd1a 2024-11-16T19:31:04,285 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/data/hbase/meta/1588230740/ns/a78e09365ce745be96dc2c8824c8fd1a, entries=2, sequenceid=6, filesize=5.0 K 2024-11-16T19:31:04,286 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 36ms, sequenceid=6, compaction requested=false 2024-11-16T19:31:04,286 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T19:31:04,298 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-16T19:31:04,298 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T19:31:04,298 INFO [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T19:31:04,298 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731785464250Running coprocessor pre-close hooks at 1731785464250Disabling compacts and flushes for region at 1731785464250Disabling writes for close at 1731785464250Obtaining lock to block concurrent updates at 1731785464251 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731785464251Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731785464251Flushing stores of hbase:meta,,1.1588230740 at 1731785464251Flushing 1588230740/ns: creating writer at 1731785464251Flushing 1588230740/ns: appending metadata at 1731785464267 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1731785464267Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3cb8bf8d: reopening flushed file at 1731785464279 (+12 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 36ms, sequenceid=6, compaction requested=false at 1731785464286 (+7 ms)Writing region close event to WAL at 1731785464291 (+5 ms)Running coprocessor post-close hooks at 1731785464298 (+7 ms)Closed at 1731785464298 2024-11-16T19:31:04,299 DEBUG [RS_CLOSE_META-regionserver/d11ab77873cb:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T19:31:04,450 INFO [RS:0;d11ab77873cb:43107 {}] regionserver.HRegionServer(976): stopping server d11ab77873cb,43107,1731785463289; all regions closed. 2024-11-16T19:31:04,451 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:04,451 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:04,452 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:04,452 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:04,452 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:04,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42679 is added to blk_1073741834_1010 (size=1152) 2024-11-16T19:31:04,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35753 is added to blk_1073741834_1010 (size=1152) 2024-11-16T19:31:04,459 DEBUG [RS:0;d11ab77873cb:43107 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/oldWALs 2024-11-16T19:31:04,459 INFO [RS:0;d11ab77873cb:43107 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d11ab77873cb%2C43107%2C1731785463289.meta:.meta(num 1731785464053) 2024-11-16T19:31:04,460 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:04,460 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:04,460 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:04,460 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:04,460 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:04,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35753 is added to blk_1073741833_1009 (size=93) 2024-11-16T19:31:04,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42679 is added to blk_1073741833_1009 (size=93) 2024-11-16T19:31:04,467 DEBUG [RS:0;d11ab77873cb:43107 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/oldWALs 2024-11-16T19:31:04,467 INFO [RS:0;d11ab77873cb:43107 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog d11ab77873cb%2C43107%2C1731785463289:(num 1731785463697) 2024-11-16T19:31:04,467 DEBUG [RS:0;d11ab77873cb:43107 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T19:31:04,467 INFO [RS:0;d11ab77873cb:43107 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T19:31:04,467 INFO [RS:0;d11ab77873cb:43107 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T19:31:04,468 INFO [RS:0;d11ab77873cb:43107 {}] hbase.ChoreService(370): Chore service for: regionserver/d11ab77873cb:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-16T19:31:04,468 INFO [RS:0;d11ab77873cb:43107 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T19:31:04,468 INFO [regionserver/d11ab77873cb:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T19:31:04,468 INFO [RS:0;d11ab77873cb:43107 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43107 2024-11-16T19:31:04,471 INFO [RS:0;d11ab77873cb:43107 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T19:31:04,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43641-0x1004a04721f0000, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T19:31:04,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43107-0x1004a04721f0001, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/d11ab77873cb,43107,1731785463289 2024-11-16T19:31:04,472 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [d11ab77873cb,43107,1731785463289] 2024-11-16T19:31:04,473 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/d11ab77873cb,43107,1731785463289 already deleted, retry=false 2024-11-16T19:31:04,473 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; d11ab77873cb,43107,1731785463289 expired; onlineServers=0 2024-11-16T19:31:04,473 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'd11ab77873cb,43641,1731785463237' ***** 2024-11-16T19:31:04,473 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T19:31:04,473 INFO [M:0;d11ab77873cb:43641 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T19:31:04,473 INFO [M:0;d11ab77873cb:43641 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T19:31:04,473 DEBUG [M:0;d11ab77873cb:43641 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T19:31:04,473 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T19:31:04,473 DEBUG [M:0;d11ab77873cb:43641 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T19:31:04,473 DEBUG [master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.small.0-1731785463439 {}] cleaner.HFileCleaner(306): Exit Thread[master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.small.0-1731785463439,5,FailOnTimeoutGroup] 2024-11-16T19:31:04,473 DEBUG [master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.large.0-1731785463439 {}] cleaner.HFileCleaner(306): Exit Thread[master/d11ab77873cb:0:becomeActiveMaster-HFileCleaner.large.0-1731785463439,5,FailOnTimeoutGroup] 2024-11-16T19:31:04,474 INFO [M:0;d11ab77873cb:43641 {}] hbase.ChoreService(370): Chore service for: master/d11ab77873cb:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T19:31:04,474 INFO [M:0;d11ab77873cb:43641 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T19:31:04,474 DEBUG [M:0;d11ab77873cb:43641 {}] master.HMaster(1795): Stopping service threads 2024-11-16T19:31:04,474 INFO [M:0;d11ab77873cb:43641 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T19:31:04,474 INFO [M:0;d11ab77873cb:43641 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T19:31:04,474 INFO [M:0;d11ab77873cb:43641 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T19:31:04,474 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T19:31:04,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43641-0x1004a04721f0000, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T19:31:04,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43641-0x1004a04721f0000, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T19:31:04,474 DEBUG [M:0;d11ab77873cb:43641 {}] zookeeper.ZKUtil(347): master:43641-0x1004a04721f0000, quorum=127.0.0.1:49769, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T19:31:04,475 WARN [M:0;d11ab77873cb:43641 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T19:31:04,475 INFO [M:0;d11ab77873cb:43641 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/.lastflushedseqids 2024-11-16T19:31:04,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42679 is added to blk_1073741838_1014 (size=99) 2024-11-16T19:31:04,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35753 is added to blk_1073741838_1014 (size=99) 2024-11-16T19:31:04,480 INFO [M:0;d11ab77873cb:43641 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T19:31:04,480 INFO [M:0;d11ab77873cb:43641 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T19:31:04,480 DEBUG [M:0;d11ab77873cb:43641 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T19:31:04,480 INFO [M:0;d11ab77873cb:43641 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:31:04,480 DEBUG [M:0;d11ab77873cb:43641 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:31:04,480 DEBUG [M:0;d11ab77873cb:43641 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T19:31:04,480 DEBUG [M:0;d11ab77873cb:43641 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:31:04,480 INFO [M:0;d11ab77873cb:43641 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-16T19:31:04,496 DEBUG [M:0;d11ab77873cb:43641 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4dc9b9a255fd46c5a2a4bde3aa4725d5 is 82, key is hbase:meta,,1/info:regioninfo/1731785464096/Put/seqid=0 2024-11-16T19:31:04,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35753 is added to blk_1073741839_1015 (size=5672) 2024-11-16T19:31:04,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42679 is added to blk_1073741839_1015 (size=5672) 2024-11-16T19:31:04,500 INFO [M:0;d11ab77873cb:43641 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4dc9b9a255fd46c5a2a4bde3aa4725d5 2024-11-16T19:31:04,516 DEBUG [M:0;d11ab77873cb:43641 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1c4bbdfb1c284e4d86560a3a43972c6d is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731785464112/Put/seqid=0 2024-11-16T19:31:04,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42679 is added to blk_1073741840_1016 (size=5275) 2024-11-16T19:31:04,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35753 is added to blk_1073741840_1016 (size=5275) 2024-11-16T19:31:04,521 INFO [M:0;d11ab77873cb:43641 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1c4bbdfb1c284e4d86560a3a43972c6d 2024-11-16T19:31:04,540 DEBUG [M:0;d11ab77873cb:43641 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e6c8599ba5604825b24ce7510eebbee3 is 69, key is d11ab77873cb,43107,1731785463289/rs:state/1731785463529/Put/seqid=0 2024-11-16T19:31:04,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35753 is added to blk_1073741841_1017 (size=5156) 2024-11-16T19:31:04,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42679 is added to blk_1073741841_1017 (size=5156) 2024-11-16T19:31:04,544 INFO [M:0;d11ab77873cb:43641 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e6c8599ba5604825b24ce7510eebbee3 2024-11-16T19:31:04,560 DEBUG [M:0;d11ab77873cb:43641 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b7da786e725446838ab32fbb74a4a1b0 is 52, key is load_balancer_on/state:d/1731785464222/Put/seqid=0 2024-11-16T19:31:04,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42679 is added to blk_1073741842_1018 (size=5056) 2024-11-16T19:31:04,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35753 is added to blk_1073741842_1018 (size=5056) 2024-11-16T19:31:04,566 INFO [M:0;d11ab77873cb:43641 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b7da786e725446838ab32fbb74a4a1b0 2024-11-16T19:31:04,570 DEBUG [M:0;d11ab77873cb:43641 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4dc9b9a255fd46c5a2a4bde3aa4725d5 as hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4dc9b9a255fd46c5a2a4bde3aa4725d5 2024-11-16T19:31:04,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43107-0x1004a04721f0001, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T19:31:04,572 INFO [RS:0;d11ab77873cb:43107 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T19:31:04,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43107-0x1004a04721f0001, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T19:31:04,572 INFO [RS:0;d11ab77873cb:43107 {}] regionserver.HRegionServer(1031): Exiting; stopping=d11ab77873cb,43107,1731785463289; zookeeper connection closed. 2024-11-16T19:31:04,572 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2e7eca60 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2e7eca60 2024-11-16T19:31:04,572 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T19:31:04,575 INFO [M:0;d11ab77873cb:43641 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4dc9b9a255fd46c5a2a4bde3aa4725d5, entries=8, sequenceid=29, filesize=5.5 K 2024-11-16T19:31:04,576 DEBUG [M:0;d11ab77873cb:43641 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1c4bbdfb1c284e4d86560a3a43972c6d as hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1c4bbdfb1c284e4d86560a3a43972c6d 2024-11-16T19:31:04,579 INFO [M:0;d11ab77873cb:43641 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1c4bbdfb1c284e4d86560a3a43972c6d, entries=3, sequenceid=29, filesize=5.2 K 2024-11-16T19:31:04,580 DEBUG [M:0;d11ab77873cb:43641 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e6c8599ba5604825b24ce7510eebbee3 as hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e6c8599ba5604825b24ce7510eebbee3 2024-11-16T19:31:04,584 INFO [M:0;d11ab77873cb:43641 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e6c8599ba5604825b24ce7510eebbee3, entries=1, sequenceid=29, filesize=5.0 K 2024-11-16T19:31:04,585 DEBUG [M:0;d11ab77873cb:43641 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b7da786e725446838ab32fbb74a4a1b0 as hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b7da786e725446838ab32fbb74a4a1b0 2024-11-16T19:31:04,585 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,36045,1731785272299/d11ab77873cb%2C36045%2C1731785272299.meta.1731785273336.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:31:04,585 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:41599/user/jenkins/test-data/125cb7f4-410f-1013-40dc-1a1cd05e70d4/WALs/d11ab77873cb,45785,1731785273562/d11ab77873cb%2C45785%2C1731785273562.1731785273801 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T19:31:04,589 INFO [M:0;d11ab77873cb:43641 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39065/user/jenkins/test-data/e77f7ed9-3c2d-e53b-443c-93fa69054b71/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b7da786e725446838ab32fbb74a4a1b0, entries=1, sequenceid=29, filesize=4.9 K 2024-11-16T19:31:04,590 INFO [M:0;d11ab77873cb:43641 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 110ms, sequenceid=29, compaction requested=false 2024-11-16T19:31:04,592 INFO [M:0;d11ab77873cb:43641 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T19:31:04,592 DEBUG [M:0;d11ab77873cb:43641 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731785464480Disabling compacts and flushes for region at 1731785464480Disabling writes for close at 1731785464480Obtaining lock to block concurrent updates at 1731785464480Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731785464480Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731785464480Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731785464481 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731785464481Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731785464495 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731785464495Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731785464504 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731785464516 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731785464516Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731785464525 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731785464539 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731785464539Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731785464548 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731785464560 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731785464560Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@29b76ecd: reopening flushed file at 1731785464570 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2b686d56: reopening flushed file at 1731785464575 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@76593d20: reopening flushed file at 1731785464580 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1d1b7336: reopening flushed file at 1731785464584 (+4 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 110ms, sequenceid=29, compaction requested=false at 1731785464590 (+6 ms)Writing region close event to WAL at 1731785464592 (+2 ms)Closed at 1731785464592 2024-11-16T19:31:04,592 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:04,592 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:04,592 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:04,592 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:04,592 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T19:31:04,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35753 is added to blk_1073741830_1006 (size=10311) 2024-11-16T19:31:04,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42679 is added to blk_1073741830_1006 (size=10311) 2024-11-16T19:31:04,595 INFO [M:0;d11ab77873cb:43641 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T19:31:04,595 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T19:31:04,595 INFO [M:0;d11ab77873cb:43641 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43641 2024-11-16T19:31:04,595 INFO [M:0;d11ab77873cb:43641 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T19:31:04,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43641-0x1004a04721f0000, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T19:31:04,696 INFO [M:0;d11ab77873cb:43641 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T19:31:04,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43641-0x1004a04721f0000, quorum=127.0.0.1:49769, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T19:31:04,700 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2b13a29{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:31:04,700 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7b4a20b3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T19:31:04,700 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T19:31:04,701 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4edfb46d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T19:31:04,701 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@637d6eb9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/hadoop.log.dir/,STOPPED} 2024-11-16T19:31:04,702 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T19:31:04,702 WARN [BP-2095152159-172.17.0.2-1731785462519 heartbeating to localhost/127.0.0.1:39065 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T19:31:04,702 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T19:31:04,702 WARN [BP-2095152159-172.17.0.2-1731785462519 heartbeating to localhost/127.0.0.1:39065 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2095152159-172.17.0.2-1731785462519 (Datanode Uuid 6faf8100-7518-4565-aeef-57015578d340) service to localhost/127.0.0.1:39065 2024-11-16T19:31:04,703 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/cluster_705530ed-101b-4d64-497c-510121ec9330/data/data3/current/BP-2095152159-172.17.0.2-1731785462519 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:31:04,704 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/cluster_705530ed-101b-4d64-497c-510121ec9330/data/data4/current/BP-2095152159-172.17.0.2-1731785462519 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:31:04,704 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T19:31:04,706 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@18478920{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T19:31:04,707 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@72ee40d2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T19:31:04,707 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T19:31:04,707 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1d359c98{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T19:31:04,707 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@76ccace4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/hadoop.log.dir/,STOPPED} 2024-11-16T19:31:04,709 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T19:31:04,709 WARN [BP-2095152159-172.17.0.2-1731785462519 heartbeating to localhost/127.0.0.1:39065 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T19:31:04,709 WARN [BP-2095152159-172.17.0.2-1731785462519 heartbeating to localhost/127.0.0.1:39065 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2095152159-172.17.0.2-1731785462519 (Datanode Uuid 1d1779f2-ca86-41a0-b714-01ed0c8077e5) service to localhost/127.0.0.1:39065 2024-11-16T19:31:04,709 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T19:31:04,709 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/cluster_705530ed-101b-4d64-497c-510121ec9330/data/data1/current/BP-2095152159-172.17.0.2-1731785462519 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:31:04,710 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/cluster_705530ed-101b-4d64-497c-510121ec9330/data/data2/current/BP-2095152159-172.17.0.2-1731785462519 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T19:31:04,710 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T19:31:04,716 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1e07b1d{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T19:31:04,717 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@18bd2805{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T19:31:04,717 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T19:31:04,717 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@93c51d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T19:31:04,717 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@eedc0c0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/28a566d9-0572-7e5b-b768-f548eac830ec/hadoop.log.dir/,STOPPED} 2024-11-16T19:31:04,723 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T19:31:04,736 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T19:31:04,743 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=270 (was 232) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39065 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39065 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:39065 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39065 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39065 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39065 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39065 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39065 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=540 (was 517) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=134 (was 134), ProcessCount=11 (was 11), AvailableMemoryMB=2992 (was 3179)